1
0
Fork 0
mirror of https://github.com/chrislusf/seaweedfs synced 2025-07-27 14:02:47 +02:00

Compare commits

..

No commits in common. "master" and "3.43" have entirely different histories.
master ... 3.43

1157 changed files with 26535 additions and 142084 deletions

View file

@ -38,13 +38,13 @@ jobs:
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
- name: Set BUILD_TIME env
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
- name: Go Release Binaries Large Disk
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
uses: wangyoucao577/go-release-action@efb9406897e4eff1aa2c7142223b096aec90f267 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
@ -53,14 +53,14 @@ jobs:
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
ldflags: -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed-large-disk
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
uses: wangyoucao577/go-release-action@efb9406897e4eff1aa2c7142223b096aec90f267 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
@ -68,7 +68,7 @@ jobs:
release_tag: dev
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
ldflags: -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed-normal-disk
@ -87,13 +87,13 @@ jobs:
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
- name: Set BUILD_TIME env
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
- name: Go Release Binaries Large Disk
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
uses: wangyoucao577/go-release-action@efb9406897e4eff1aa2c7142223b096aec90f267 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
@ -102,14 +102,14 @@ jobs:
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
ldflags: -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed-large-disk
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
uses: wangyoucao577/go-release-action@efb9406897e4eff1aa2c7142223b096aec90f267 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
@ -117,7 +117,7 @@ jobs:
release_tag: dev
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
ldflags: -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed-normal-disk

View file

@ -28,9 +28,9 @@ jobs:
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
uses: wangyoucao577/go-release-action@efb9406897e4eff1aa2c7142223b096aec90f267 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
@ -38,13 +38,13 @@ jobs:
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
# build_flags: -tags 5BytesOffset # optional, default is
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
ldflags: -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
uses: wangyoucao577/go-release-action@efb9406897e4eff1aa2c7142223b096aec90f267 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
@ -52,7 +52,7 @@ jobs:
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
ldflags: -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed

View file

@ -28,9 +28,9 @@ jobs:
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
uses: wangyoucao577/go-release-action@efb9406897e4eff1aa2c7142223b096aec90f267 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
@ -38,13 +38,13 @@ jobs:
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
# build_flags: -tags 5BytesOffset # optional, default is
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
ldflags: -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
uses: wangyoucao577/go-release-action@efb9406897e4eff1aa2c7142223b096aec90f267 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
@ -52,7 +52,7 @@ jobs:
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
ldflags: -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed

View file

@ -28,9 +28,9 @@ jobs:
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
uses: wangyoucao577/go-release-action@efb9406897e4eff1aa2c7142223b096aec90f267 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
@ -38,13 +38,13 @@ jobs:
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
# build_flags: -tags 5BytesOffset # optional, default is
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
ldflags: -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
uses: wangyoucao577/go-release-action@efb9406897e4eff1aa2c7142223b096aec90f267 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
@ -52,7 +52,7 @@ jobs:
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
ldflags: -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed

View file

@ -28,9 +28,9 @@ jobs:
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
uses: wangyoucao577/go-release-action@efb9406897e4eff1aa2c7142223b096aec90f267 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
@ -38,13 +38,13 @@ jobs:
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
# build_flags: -tags 5BytesOffset # optional, default is
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
ldflags: -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
uses: wangyoucao577/go-release-action@efb9406897e4eff1aa2c7142223b096aec90f267 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
@ -52,7 +52,7 @@ jobs:
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
ldflags: -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed

View file

@ -28,32 +28,32 @@ jobs:
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
uses: wangyoucao577/go-release-action@efb9406897e4eff1aa2c7142223b096aec90f267 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
overwrite: true
build_flags: -tags elastic,gocdk,rclone,sqlite,tarantool,tikv,ydb
build_flags: -tags elastic,ydb,gocdk,tikv
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
# build_flags: -tags 5BytesOffset # optional, default is
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
ldflags: -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_full"
- name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
uses: wangyoucao577/go-release-action@efb9406897e4eff1aa2c7142223b096aec90f267 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset,elastic,gocdk,rclone,sqlite,tarantool,tikv,ydb
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
build_flags: -tags 5BytesOffset,elastic,ydb,gocdk,tikv
ldflags: -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed

View file

@ -1,59 +0,0 @@
# This is a basic workflow to help you get started with Actions
name: "go: build versioned binaries for openbsd"
on:
push:
tags:
- '*'
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
permissions:
contents: read
jobs:
build-release-binaries_openbsd:
permissions:
contents: write # for wangyoucao577/go-release-action to upload release assets
runs-on: ubuntu-latest
strategy:
matrix:
goos: [openbsd]
goarch: [amd64, arm, arm64]
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
# build_flags: -tags 5BytesOffset # optional, default is
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_large_disk"

View file

@ -18,11 +18,11 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
uses: github/codeql-action/init@v2
# Override language selection by uncommenting this and choosing your languages
with:
languages: go
@ -30,7 +30,7 @@ jobs:
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below).
- name: Autobuild
uses: github/codeql-action/autobuild@v3
uses: github/codeql-action/autobuild@v2
# Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
@ -44,4 +44,4 @@ jobs:
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
uses: github/codeql-action/analyze@v2

View file

@ -16,11 +16,11 @@ jobs:
steps:
-
name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
-
name: Docker meta
id: docker_meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v3
uses: docker/metadata-action@507c2f2dc502c992ad446e3d7a5dfbe311567a96 # v3
with:
images: |
chrislusf/seaweedfs
@ -33,30 +33,30 @@ jobs:
org.opencontainers.image.vendor=Chris Lu
-
name: Set up QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v1
with:
buildkitd-flags: "--debug"
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Login to GHCR
if: github.event_name != 'pull_request'
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v1
with:
registry: ghcr.io
username: ${{ secrets.GHCR_USERNAME }}
password: ${{ secrets.GHCR_TOKEN }}
-
name: Build
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}

View file

@ -17,11 +17,11 @@ jobs:
steps:
-
name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
-
name: Docker meta
id: docker_meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v3
uses: docker/metadata-action@507c2f2dc502c992ad446e3d7a5dfbe311567a96 # v3
with:
images: |
chrislusf/seaweedfs
@ -34,30 +34,30 @@ jobs:
org.opencontainers.image.vendor=Chris Lu
-
name: Set up QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v1
with:
buildkitd-flags: "--debug"
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Login to GHCR
if: github.event_name != 'pull_request'
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v1
with:
registry: ghcr.io
username: ${{ secrets.GHCR_USERNAME }}
password: ${{ secrets.GHCR_TOKEN }}
-
name: Build
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}

View file

@ -16,11 +16,11 @@ jobs:
steps:
-
name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
-
name: Docker meta
id: docker_meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v3
uses: docker/metadata-action@507c2f2dc502c992ad446e3d7a5dfbe311567a96 # v3
with:
images: |
chrislusf/seaweedfs
@ -34,20 +34,20 @@ jobs:
org.opencontainers.image.vendor=Chris Lu
-
name: Set up QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v1
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Build
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}

View file

@ -17,11 +17,11 @@ jobs:
steps:
-
name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
-
name: Docker meta
id: docker_meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v3
uses: docker/metadata-action@507c2f2dc502c992ad446e3d7a5dfbe311567a96 # v3
with:
images: |
chrislusf/seaweedfs
@ -35,20 +35,20 @@ jobs:
org.opencontainers.image.vendor=Chris Lu
-
name: Set up QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v1
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Build
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}

View file

@ -17,11 +17,11 @@ jobs:
steps:
-
name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
-
name: Docker meta
id: docker_meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v3
uses: docker/metadata-action@507c2f2dc502c992ad446e3d7a5dfbe311567a96 # v3
with:
images: |
chrislusf/seaweedfs
@ -35,20 +35,20 @@ jobs:
org.opencontainers.image.vendor=Chris Lu
-
name: Set up QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v1
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Build
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}

View file

@ -16,11 +16,11 @@ jobs:
steps:
-
name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
-
name: Docker meta
id: docker_meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v3
uses: docker/metadata-action@507c2f2dc502c992ad446e3d7a5dfbe311567a96 # v3
with:
images: |
chrislusf/seaweedfs
@ -34,25 +34,25 @@ jobs:
org.opencontainers.image.vendor=Chris Lu
-
name: Set up QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v1
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Build
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}
file: ./docker/Dockerfile.go_build
build-args: TAGS=elastic,gocdk,rclone,sqlite,tarantool,tikv,ydb
build-args: TAGS=elastic,ydb,gocdk,tikv
platforms: linux/amd64
tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }}

View file

@ -16,11 +16,11 @@ jobs:
steps:
-
name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
-
name: Docker meta
id: docker_meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v3
uses: docker/metadata-action@507c2f2dc502c992ad446e3d7a5dfbe311567a96 # v3
with:
images: |
chrislusf/seaweedfs
@ -34,25 +34,25 @@ jobs:
org.opencontainers.image.vendor=Chris Lu
-
name: Set up QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v1
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Build
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}
file: ./docker/Dockerfile.go_build
build-args: TAGS=5BytesOffset,elastic,gocdk,rclone,sqlite,tarantool,tikv,ydb
build-args: TAGS=5BytesOffset,elastic,ydb,gocdk,tikv
platforms: linux/amd64
tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }}

View file

@ -1,171 +0,0 @@
# This workflow will build and deploy the SeaweedFS telemetry server
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go
name: Deploy Telemetry Server
on:
workflow_dispatch:
inputs:
setup:
description: 'Run first-time server setup'
required: true
type: boolean
default: false
deploy:
description: 'Deploy telemetry server to remote server'
required: true
type: boolean
default: false
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5.5.0
with:
go-version: '1.24'
- name: Build Telemetry Server
if: github.event_name == 'workflow_dispatch' && inputs.deploy
run: |
go mod tidy
echo "Building telemetry server..."
GOOS=linux GOARCH=amd64 go build -o telemetry-server ./telemetry/server/main.go
ls -la telemetry-server
echo "Build completed successfully"
- name: First-time Server Setup
if: github.event_name == 'workflow_dispatch' && inputs.setup
env:
SSH_PRIVATE_KEY: ${{ secrets.TELEMETRY_SSH_PRIVATE_KEY }}
REMOTE_HOST: ${{ secrets.TELEMETRY_HOST }}
REMOTE_USER: ${{ secrets.TELEMETRY_USER }}
run: |
mkdir -p ~/.ssh
echo "$SSH_PRIVATE_KEY" > ~/.ssh/deploy_key
chmod 600 ~/.ssh/deploy_key
echo "Host *" > ~/.ssh/config
echo " StrictHostKeyChecking no" >> ~/.ssh/config
# Create all required directories with proper permissions
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "
mkdir -p ~/seaweedfs-telemetry/bin ~/seaweedfs-telemetry/logs ~/seaweedfs-telemetry/data ~/seaweedfs-telemetry/tmp && \
chmod 755 ~/seaweedfs-telemetry/logs && \
chmod 755 ~/seaweedfs-telemetry/data && \
touch ~/seaweedfs-telemetry/logs/telemetry.log ~/seaweedfs-telemetry/logs/telemetry.error.log && \
chmod 644 ~/seaweedfs-telemetry/logs/*.log"
# Create systemd service file
echo "
[Unit]
Description=SeaweedFS Telemetry Server
After=network.target
[Service]
Type=simple
User=$REMOTE_USER
WorkingDirectory=/home/$REMOTE_USER/seaweedfs-telemetry
ExecStart=/home/$REMOTE_USER/seaweedfs-telemetry/bin/telemetry-server -port=8353
Restart=always
RestartSec=5
StandardOutput=append:/home/$REMOTE_USER/seaweedfs-telemetry/logs/telemetry.log
StandardError=append:/home/$REMOTE_USER/seaweedfs-telemetry/logs/telemetry.error.log
[Install]
WantedBy=multi-user.target" > telemetry.service
# Setup logrotate configuration
echo "# SeaweedFS Telemetry service log rotation
/home/$REMOTE_USER/seaweedfs-telemetry/logs/*.log {
daily
rotate 30
compress
delaycompress
missingok
notifempty
create 644 $REMOTE_USER $REMOTE_USER
postrotate
systemctl restart telemetry.service
endscript
}" > telemetry_logrotate
# Copy configuration files
scp -i ~/.ssh/deploy_key telemetry/grafana-dashboard.json $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
scp -i ~/.ssh/deploy_key telemetry/prometheus.yml $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
# Copy and install service and logrotate files
scp -i ~/.ssh/deploy_key telemetry.service telemetry_logrotate $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "
sudo mv ~/seaweedfs-telemetry/telemetry.service /etc/systemd/system/ && \
sudo mv ~/seaweedfs-telemetry/telemetry_logrotate /etc/logrotate.d/seaweedfs-telemetry && \
sudo systemctl daemon-reload && \
sudo systemctl enable telemetry.service"
echo "✅ First-time setup completed successfully!"
echo "📋 Next step: Run the deployment to install the telemetry server binary"
echo " 1. Go to GitHub Actions → Deploy Telemetry Server"
echo " 2. Click 'Run workflow'"
echo " 3. Check 'Deploy telemetry server to remote server'"
echo " 4. Click 'Run workflow'"
rm -f ~/.ssh/deploy_key
- name: Deploy Telemetry Server to Remote Server
if: github.event_name == 'workflow_dispatch' && inputs.deploy
env:
SSH_PRIVATE_KEY: ${{ secrets.TELEMETRY_SSH_PRIVATE_KEY }}
REMOTE_HOST: ${{ secrets.TELEMETRY_HOST }}
REMOTE_USER: ${{ secrets.TELEMETRY_USER }}
run: |
mkdir -p ~/.ssh
echo "$SSH_PRIVATE_KEY" > ~/.ssh/deploy_key
chmod 600 ~/.ssh/deploy_key
echo "Host *" > ~/.ssh/config
echo " StrictHostKeyChecking no" >> ~/.ssh/config
# Create temp directory and copy binary
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "mkdir -p ~/seaweedfs-telemetry/tmp"
scp -i ~/.ssh/deploy_key telemetry-server $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/tmp/
# Copy updated configuration files
scp -i ~/.ssh/deploy_key telemetry/grafana-dashboard.json $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
scp -i ~/.ssh/deploy_key telemetry/prometheus.yml $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
# Check if service exists and deploy accordingly
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "
if systemctl list-unit-files telemetry.service >/dev/null 2>&1; then
echo 'Service exists, performing update...'
sudo systemctl stop telemetry.service
mkdir -p ~/seaweedfs-telemetry/bin
mv ~/seaweedfs-telemetry/tmp/telemetry-server ~/seaweedfs-telemetry/bin/
chmod +x ~/seaweedfs-telemetry/bin/telemetry-server
sudo systemctl start telemetry.service
sudo systemctl status telemetry.service
else
echo 'ERROR: telemetry.service not found!'
echo 'Please run the first-time setup before deploying.'
echo 'Go to GitHub Actions → Deploy Telemetry Server → Run workflow → Check \"Run first-time server setup\"'
exit 1
fi"
# Verify deployment
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "
echo 'Waiting for service to start...'
sleep 5
curl -f http://localhost:8353/health || echo 'Health check failed'"
rm -f ~/.ssh/deploy_key
- name: Notify Deployment Status
if: always()
run: |
if [ "${{ job.status }}" == "success" ]; then
echo "✅ Telemetry server deployment successful"
echo "Dashboard: http://${{ secrets.TELEMETRY_HOST }}:8353"
echo "Metrics: http://${{ secrets.TELEMETRY_HOST }}:8353/metrics"
else
echo "❌ Telemetry server deployment failed"
fi

View file

@ -9,6 +9,6 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: 'Checkout Repository'
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c
- name: 'Dependency Review'
uses: actions/dependency-review-action@da24556b548a50705dd671f47852072ea4c105d9
uses: actions/dependency-review-action@c090f4e553673e6e505ea70d6a95362ee12adb94

View file

@ -21,16 +21,16 @@ jobs:
e2e:
name: FUSE Mount
runs-on: ubuntu-22.04
timeout-minutes: 30
timeout-minutes: 15
steps:
- name: Set up Go 1.x
uses: actions/setup-go@fa96338abe5531f6e34c5cc0bbe28c1a533d5505 # v2
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v2
with:
go-version: ^1.13
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
- name: Install dependencies
run: |
@ -41,20 +41,20 @@ jobs:
timeout-minutes: 5
run: make build_e2e && docker compose -f ./compose/e2e-mount.yml up --wait
- name: Run FIO 4k
- name: Run FIO
timeout-minutes: 15
run: |
echo "Starting FIO at: $(date)"
# Concurrent r/w
echo 'Run randrw with size=16M bs=4k'
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randrw --bs=4k --direct=1 --numjobs=8 --ioengine=libaio --group_reporting --runtime=30 --time_based=1
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randrw --bs=4k --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1
echo "Verify FIO at: $(date)"
# Verified write
echo 'Run randwrite with size=16M bs=4k'
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randwrite --bs=4k --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1 --do_verify=0 --verify=crc32c --verify_backlog=1
- name: Run FIO 128k
- name: Run FIO
timeout-minutes: 15
run: |
echo "Starting FIO at: $(date)"
@ -67,7 +67,7 @@ jobs:
echo 'Run randwrite with size=16M bs=128k'
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randwrite --bs=128k --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1 --do_verify=0 --verify=crc32c --verify_backlog=1
- name: Run FIO 1MB
- name: Run FIO
timeout-minutes: 15
run: |
echo "Starting FIO at: $(date)"
@ -94,7 +94,7 @@ jobs:
- name: Archive logs
if: always()
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: output-logs
path: docker/output.log

View file

@ -1,234 +0,0 @@
name: "FUSE Integration Tests"
on:
push:
branches: [ master, main ]
paths:
- 'weed/**'
- 'test/fuse_integration/**'
- '.github/workflows/fuse-integration.yml'
pull_request:
branches: [ master, main ]
paths:
- 'weed/**'
- 'test/fuse_integration/**'
- '.github/workflows/fuse-integration.yml'
concurrency:
group: ${{ github.head_ref }}/fuse-integration
cancel-in-progress: true
permissions:
contents: read
env:
GO_VERSION: '1.21'
TEST_TIMEOUT: '45m'
jobs:
fuse-integration:
name: FUSE Integration Testing
runs-on: ubuntu-22.04
timeout-minutes: 50
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Go ${{ env.GO_VERSION }}
uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- name: Install FUSE and dependencies
run: |
sudo apt-get update
sudo apt-get install -y fuse libfuse-dev
# Verify FUSE installation
fusermount --version || true
ls -la /dev/fuse || true
- name: Build SeaweedFS
run: |
cd weed
go build -tags "elastic gocdk sqlite ydb tarantool tikv rclone" -v .
chmod +x weed
# Verify binary
./weed version
- name: Prepare FUSE Integration Tests
run: |
# Create isolated test directory to avoid Go module conflicts
mkdir -p /tmp/seaweedfs-fuse-tests
# Copy only the working test files to avoid Go module conflicts
# These are the files we've verified work without package name issues
cp test/fuse_integration/simple_test.go /tmp/seaweedfs-fuse-tests/ 2>/dev/null || echo "⚠️ simple_test.go not found"
cp test/fuse_integration/working_demo_test.go /tmp/seaweedfs-fuse-tests/ 2>/dev/null || echo "⚠️ working_demo_test.go not found"
# Note: Other test files (framework.go, basic_operations_test.go, etc.)
# have Go module conflicts and are skipped until resolved
echo "📁 Working test files copied:"
ls -la /tmp/seaweedfs-fuse-tests/*.go 2>/dev/null || echo " No test files found"
# Initialize Go module in isolated directory
cd /tmp/seaweedfs-fuse-tests
go mod init seaweedfs-fuse-tests
go mod tidy
# Verify setup
echo "✅ FUSE integration test environment prepared"
ls -la /tmp/seaweedfs-fuse-tests/
echo ""
echo " Current Status: Running working subset of FUSE tests"
echo " • simple_test.go: Package structure verification"
echo " • working_demo_test.go: Framework capability demonstration"
echo " • Full framework: Available in test/fuse_integration/ (module conflicts pending resolution)"
- name: Run FUSE Integration Tests
run: |
cd /tmp/seaweedfs-fuse-tests
echo "🧪 Running FUSE integration tests..."
echo "============================================"
# Run available working test files
TESTS_RUN=0
if [ -f "simple_test.go" ]; then
echo "📋 Running simple_test.go..."
go test -v -timeout=${{ env.TEST_TIMEOUT }} simple_test.go
TESTS_RUN=$((TESTS_RUN + 1))
fi
if [ -f "working_demo_test.go" ]; then
echo "📋 Running working_demo_test.go..."
go test -v -timeout=${{ env.TEST_TIMEOUT }} working_demo_test.go
TESTS_RUN=$((TESTS_RUN + 1))
fi
# Run combined test if multiple files exist
if [ -f "simple_test.go" ] && [ -f "working_demo_test.go" ]; then
echo "📋 Running combined tests..."
go test -v -timeout=${{ env.TEST_TIMEOUT }} simple_test.go working_demo_test.go
fi
if [ $TESTS_RUN -eq 0 ]; then
echo "⚠️ No working test files found, running module verification only"
go version
go mod verify
else
echo "✅ Successfully ran $TESTS_RUN test file(s)"
fi
echo "============================================"
echo "✅ FUSE integration tests completed"
- name: Run Extended Framework Validation
run: |
cd /tmp/seaweedfs-fuse-tests
echo "🔍 Running extended framework validation..."
echo "============================================"
# Test individual components (only run tests that exist)
if [ -f "simple_test.go" ]; then
echo "Testing simple verification..."
go test -v simple_test.go
fi
if [ -f "working_demo_test.go" ]; then
echo "Testing framework demo..."
go test -v working_demo_test.go
fi
# Test combined execution if both files exist
if [ -f "simple_test.go" ] && [ -f "working_demo_test.go" ]; then
echo "Testing combined execution..."
go test -v simple_test.go working_demo_test.go
elif [ -f "simple_test.go" ] || [ -f "working_demo_test.go" ]; then
echo "✅ Individual tests already validated above"
else
echo "⚠️ No working test files found for combined testing"
fi
echo "============================================"
echo "✅ Extended validation completed"
- name: Generate Test Coverage Report
run: |
cd /tmp/seaweedfs-fuse-tests
echo "📊 Generating test coverage report..."
go test -v -coverprofile=coverage.out .
go tool cover -html=coverage.out -o coverage.html
echo "Coverage report generated: coverage.html"
- name: Verify SeaweedFS Binary Integration
run: |
# Test that SeaweedFS binary is accessible from test environment
WEED_BINARY=$(pwd)/weed/weed
if [ -f "$WEED_BINARY" ]; then
echo "✅ SeaweedFS binary found at: $WEED_BINARY"
$WEED_BINARY version
echo "Binary is ready for full integration testing"
else
echo "❌ SeaweedFS binary not found"
exit 1
fi
- name: Upload Test Artifacts
if: always()
uses: actions/upload-artifact@v4
with:
name: fuse-integration-test-results
path: |
/tmp/seaweedfs-fuse-tests/coverage.out
/tmp/seaweedfs-fuse-tests/coverage.html
/tmp/seaweedfs-fuse-tests/*.log
retention-days: 7
- name: Test Summary
if: always()
run: |
echo "## 🚀 FUSE Integration Test Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Framework Status" >> $GITHUB_STEP_SUMMARY
echo "- ✅ **Framework Design**: Complete and validated" >> $GITHUB_STEP_SUMMARY
echo "- ✅ **Working Tests**: Core framework demonstration functional" >> $GITHUB_STEP_SUMMARY
echo "- ⚠️ **Full Framework**: Available but requires Go module resolution" >> $GITHUB_STEP_SUMMARY
echo "- ✅ **CI/CD Integration**: Automated testing pipeline established" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Test Capabilities" >> $GITHUB_STEP_SUMMARY
echo "- 📁 **File Operations**: Create, read, write, delete, permissions" >> $GITHUB_STEP_SUMMARY
echo "- 📂 **Directory Operations**: Create, list, delete, nested structures" >> $GITHUB_STEP_SUMMARY
echo "- 📊 **Large Files**: Multi-megabyte file handling" >> $GITHUB_STEP_SUMMARY
echo "- 🔄 **Concurrent Operations**: Multi-threaded stress testing" >> $GITHUB_STEP_SUMMARY
echo "- ⚠️ **Error Scenarios**: Comprehensive error handling validation" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Comparison with Current Tests" >> $GITHUB_STEP_SUMMARY
echo "| Aspect | Current (FIO) | This Framework |" >> $GITHUB_STEP_SUMMARY
echo "|--------|---------------|----------------|" >> $GITHUB_STEP_SUMMARY
echo "| **Scope** | Performance only | Functional + Performance |" >> $GITHUB_STEP_SUMMARY
echo "| **Operations** | Read/Write only | All FUSE operations |" >> $GITHUB_STEP_SUMMARY
echo "| **Concurrency** | Single-threaded | Multi-threaded stress tests |" >> $GITHUB_STEP_SUMMARY
echo "| **Automation** | Manual setup | Fully automated |" >> $GITHUB_STEP_SUMMARY
echo "| **Validation** | Speed metrics | Correctness + Performance |" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Current Working Tests" >> $GITHUB_STEP_SUMMARY
echo "- ✅ **Framework Structure**: Package and module verification" >> $GITHUB_STEP_SUMMARY
echo "- ✅ **Configuration Management**: Test config validation" >> $GITHUB_STEP_SUMMARY
echo "- ✅ **File Operations Demo**: Basic file create/read/write simulation" >> $GITHUB_STEP_SUMMARY
echo "- ✅ **Large File Handling**: 1MB+ file processing demonstration" >> $GITHUB_STEP_SUMMARY
echo "- ✅ **Concurrency Simulation**: Multi-file operation testing" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Next Steps" >> $GITHUB_STEP_SUMMARY
echo "1. **Module Resolution**: Fix Go package conflicts for full framework" >> $GITHUB_STEP_SUMMARY
echo "2. **SeaweedFS Integration**: Connect with real cluster for end-to-end testing" >> $GITHUB_STEP_SUMMARY
echo "3. **Performance Benchmarks**: Add performance regression testing" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "📈 **Total Framework Size**: ~1,500 lines of comprehensive testing infrastructure" >> $GITHUB_STEP_SUMMARY

View file

@ -21,20 +21,20 @@ jobs:
steps:
- name: Set up Go 1.x
uses: actions/setup-go@fa96338abe5531f6e34c5cc0bbe28c1a533d5505 # v2
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v2
with:
go-version: ^1.13
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v2
- name: Get dependencies
run: |
cd weed; go get -v -t -d ./...
- name: Build
run: cd weed; go build -tags "elastic gocdk sqlite ydb tarantool tikv rclone" -v .
run: cd weed; go build -tags "elastic gocdk sqlite ydb tikv" -v .
- name: Test
run: cd weed; go test -tags "elastic gocdk sqlite ydb tarantool tikv rclone" -v ./...
run: cd weed; go test -tags "elastic gocdk sqlite ydb tikv" -v ./...

View file

@ -12,12 +12,11 @@ jobs:
release:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/checkout@v3
- name: Publish Helm charts
uses: stefanprodan/helm-gh-pages@master
with:
token: ${{ secrets.GITHUB_TOKEN }}
charts_dir: k8s/charts
target_dir: helm
branch: gh-pages
helm_version: v3.18.4
branch: master

View file

@ -1,51 +0,0 @@
name: "helm: lint and test charts"
on:
push:
branches: [ master ]
paths: ['k8s/**']
pull_request:
branches: [ master ]
paths: ['k8s/**']
permissions:
contents: read
jobs:
lint-test:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with:
fetch-depth: 0
- name: Set up Helm
uses: azure/setup-helm@v4
with:
version: v3.18.4
- uses: actions/setup-python@v5
with:
python-version: '3.9'
check-latest: true
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.7.0
- name: Run chart-testing (list-changed)
id: list-changed
run: |
changed=$(ct list-changed --target-branch ${{ github.event.repository.default_branch }} --chart-dirs k8s/charts)
if [[ -n "$changed" ]]; then
echo "::set-output name=changed::true"
fi
- name: Run chart-testing (lint)
run: ct lint --target-branch ${{ github.event.repository.default_branch }} --all --validate-maintainers=false --chart-dirs k8s/charts
- name: Create kind cluster
uses: helm/kind-action@v1.12.0
- name: Run chart-testing (install)
run: ct install --target-branch ${{ github.event.repository.default_branch }} --all --chart-dirs k8s/charts

View file

@ -1,412 +0,0 @@
name: "S3 Go Tests"
on:
pull_request:
concurrency:
group: ${{ github.head_ref }}/s3-go-tests
cancel-in-progress: true
permissions:
contents: read
defaults:
run:
working-directory: weed
jobs:
s3-versioning-tests:
name: S3 Versioning Tests
runs-on: ubuntu-22.04
timeout-minutes: 30
strategy:
matrix:
test-type: ["quick", "comprehensive"]
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
id: go
- name: Install SeaweedFS
run: |
go install -buildvcs=false
- name: Run S3 Versioning Tests - ${{ matrix.test-type }}
timeout-minutes: 25
working-directory: test/s3/versioning
run: |
set -x
echo "=== System Information ==="
uname -a
free -h
df -h
echo "=== Starting Tests ==="
# Run tests with automatic server management
# The test-with-server target handles server startup/shutdown automatically
if [ "${{ matrix.test-type }}" = "quick" ]; then
# Override TEST_PATTERN for quick tests only
make test-with-server TEST_PATTERN="TestBucketListReturnDataVersioning|TestVersioningBasicWorkflow|TestVersioningDeleteMarkers"
else
# Run all versioning tests
make test-with-server
fi
- name: Show server logs on failure
if: failure()
working-directory: test/s3/versioning
run: |
echo "=== Server Logs ==="
if [ -f weed-test.log ]; then
echo "Last 100 lines of server logs:"
tail -100 weed-test.log
else
echo "No server log file found"
fi
echo "=== Test Environment ==="
ps aux | grep -E "(weed|test)" || true
netstat -tlnp | grep -E "(8333|9333|8080)" || true
- name: Upload test logs on failure
if: failure()
uses: actions/upload-artifact@v4
with:
name: s3-versioning-test-logs-${{ matrix.test-type }}
path: test/s3/versioning/weed-test*.log
retention-days: 3
s3-versioning-compatibility:
name: S3 Versioning Compatibility Test
runs-on: ubuntu-22.04
timeout-minutes: 20
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
id: go
- name: Install SeaweedFS
run: |
go install -buildvcs=false
- name: Run Core Versioning Test (Python s3tests equivalent)
timeout-minutes: 15
working-directory: test/s3/versioning
run: |
set -x
echo "=== System Information ==="
uname -a
free -h
# Run the specific test that is equivalent to the Python s3tests
make test-with-server || {
echo "❌ Test failed, checking logs..."
if [ -f weed-test.log ]; then
echo "=== Server logs ==="
tail -100 weed-test.log
fi
echo "=== Process information ==="
ps aux | grep -E "(weed|test)" || true
exit 1
}
- name: Upload server logs on failure
if: failure()
uses: actions/upload-artifact@v4
with:
name: s3-versioning-compatibility-logs
path: test/s3/versioning/weed-test*.log
retention-days: 3
s3-cors-compatibility:
name: S3 CORS Compatibility Test
runs-on: ubuntu-22.04
timeout-minutes: 20
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
id: go
- name: Install SeaweedFS
run: |
go install -buildvcs=false
- name: Run Core CORS Test (AWS S3 compatible)
timeout-minutes: 15
working-directory: test/s3/cors
run: |
set -x
echo "=== System Information ==="
uname -a
free -h
# Run the specific test that is equivalent to AWS S3 CORS behavior
make test-with-server || {
echo "❌ Test failed, checking logs..."
if [ -f weed-test.log ]; then
echo "=== Server logs ==="
tail -100 weed-test.log
fi
echo "=== Process information ==="
ps aux | grep -E "(weed|test)" || true
exit 1
}
- name: Upload server logs on failure
if: failure()
uses: actions/upload-artifact@v4
with:
name: s3-cors-compatibility-logs
path: test/s3/cors/weed-test*.log
retention-days: 3
s3-retention-tests:
name: S3 Retention Tests
runs-on: ubuntu-22.04
timeout-minutes: 30
strategy:
matrix:
test-type: ["quick", "comprehensive"]
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
id: go
- name: Install SeaweedFS
run: |
go install -buildvcs=false
- name: Run S3 Retention Tests - ${{ matrix.test-type }}
timeout-minutes: 25
working-directory: test/s3/retention
run: |
set -x
echo "=== System Information ==="
uname -a
free -h
df -h
echo "=== Starting Tests ==="
# Run tests with automatic server management
# The test-with-server target handles server startup/shutdown automatically
if [ "${{ matrix.test-type }}" = "quick" ]; then
# Override TEST_PATTERN for quick tests only
make test-with-server TEST_PATTERN="TestBasicRetentionWorkflow|TestRetentionModeCompliance|TestLegalHoldWorkflow"
else
# Run all retention tests
make test-with-server
fi
- name: Show server logs on failure
if: failure()
working-directory: test/s3/retention
run: |
echo "=== Server Logs ==="
if [ -f weed-test.log ]; then
echo "Last 100 lines of server logs:"
tail -100 weed-test.log
else
echo "No server log file found"
fi
echo "=== Test Environment ==="
ps aux | grep -E "(weed|test)" || true
netstat -tlnp | grep -E "(8333|9333|8080)" || true
- name: Upload test logs on failure
if: failure()
uses: actions/upload-artifact@v4
with:
name: s3-retention-test-logs-${{ matrix.test-type }}
path: test/s3/retention/weed-test*.log
retention-days: 3
s3-cors-tests:
name: S3 CORS Tests
runs-on: ubuntu-22.04
timeout-minutes: 30
strategy:
matrix:
test-type: ["quick", "comprehensive"]
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
id: go
- name: Install SeaweedFS
run: |
go install -buildvcs=false
- name: Run S3 CORS Tests - ${{ matrix.test-type }}
timeout-minutes: 25
working-directory: test/s3/cors
run: |
set -x
echo "=== System Information ==="
uname -a
free -h
df -h
echo "=== Starting Tests ==="
# Run tests with automatic server management
# The test-with-server target handles server startup/shutdown automatically
if [ "${{ matrix.test-type }}" = "quick" ]; then
# Override TEST_PATTERN for quick tests only
make test-with-server TEST_PATTERN="TestCORSConfigurationManagement|TestServiceLevelCORS|TestCORSBasicWorkflow"
else
# Run all CORS tests
make test-with-server
fi
- name: Show server logs on failure
if: failure()
working-directory: test/s3/cors
run: |
echo "=== Server Logs ==="
if [ -f weed-test.log ]; then
echo "Last 100 lines of server logs:"
tail -100 weed-test.log
else
echo "No server log file found"
fi
echo "=== Test Environment ==="
ps aux | grep -E "(weed|test)" || true
netstat -tlnp | grep -E "(8333|9333|8080)" || true
- name: Upload test logs on failure
if: failure()
uses: actions/upload-artifact@v4
with:
name: s3-cors-test-logs-${{ matrix.test-type }}
path: test/s3/cors/weed-test*.log
retention-days: 3
s3-retention-worm:
name: S3 Retention WORM Integration Test
runs-on: ubuntu-22.04
timeout-minutes: 20
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
id: go
- name: Install SeaweedFS
run: |
go install -buildvcs=false
- name: Run WORM Integration Tests
timeout-minutes: 15
working-directory: test/s3/retention
run: |
set -x
echo "=== System Information ==="
uname -a
free -h
# Run the WORM integration tests with automatic server management
# The test-with-server target handles server startup/shutdown automatically
make test-with-server TEST_PATTERN="TestWORM|TestRetentionExtendedAttributes|TestRetentionConcurrentOperations" || {
echo "❌ WORM integration test failed, checking logs..."
if [ -f weed-test.log ]; then
echo "=== Server logs ==="
tail -100 weed-test.log
fi
echo "=== Process information ==="
ps aux | grep -E "(weed|test)" || true
exit 1
}
- name: Upload server logs on failure
if: failure()
uses: actions/upload-artifact@v4
with:
name: s3-retention-worm-logs
path: test/s3/retention/weed-test*.log
retention-days: 3
s3-versioning-stress:
name: S3 Versioning Stress Test
runs-on: ubuntu-22.04
timeout-minutes: 35
# Only run stress tests on master branch pushes to avoid overloading PR testing
if: github.event_name == 'push' && github.ref == 'refs/heads/master'
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
id: go
- name: Install SeaweedFS
run: |
go install -buildvcs=false
- name: Run S3 Versioning Stress Tests
timeout-minutes: 30
working-directory: test/s3/versioning
run: |
set -x
echo "=== System Information ==="
uname -a
free -h
# Run stress tests (concurrent operations)
make test-versioning-stress || {
echo "❌ Stress test failed, checking logs..."
if [ -f weed-test.log ]; then
echo "=== Server logs ==="
tail -200 weed-test.log
fi
make clean
exit 1
}
make clean
- name: Upload stress test logs
if: always()
uses: actions/upload-artifact@v4
with:
name: s3-versioning-stress-logs
path: test/s3/versioning/weed-test*.log
retention-days: 7

File diff suppressed because it is too large Load diff

View file

@ -1,79 +0,0 @@
name: "test s3 over https using aws-cli"
on:
push:
branches: [master, test-https-s3-awscli]
pull_request:
branches: [master, test-https-s3-awscli]
env:
AWS_ACCESS_KEY_ID: some_access_key1
AWS_SECRET_ACCESS_KEY: some_secret_key1
AWS_ENDPOINT_URL: https://localhost:8443
defaults:
run:
working-directory: weed
jobs:
awscli-tests:
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5.5.0
with:
go-version: ^1.24
- name: Build SeaweedFS
run: |
go build
- name: Start SeaweedFS
run: |
set -e
mkdir -p /tmp/data
./weed server -s3 -dir=/tmp/data -s3.config=../docker/compose/s3.json &
until curl -s http://localhost:8333/ > /dev/null; do sleep 1; done
- name: Setup Caddy
run: |
curl -fsSL "https://caddyserver.com/api/download?os=linux&arch=amd64" -o caddy
chmod +x caddy
./caddy version
echo "{
auto_https disable_redirects
local_certs
}
localhost:8443 {
tls internal
reverse_proxy localhost:8333
}" > Caddyfile
- name: Start Caddy
run: |
./caddy start
until curl -fsS --insecure https://localhost:8443 > /dev/null; do sleep 1; done
- name: Create Bucket
run: |
aws --no-verify-ssl s3api create-bucket --bucket bucket
- name: Test PutObject
run: |
set -e
dd if=/dev/urandom of=generated bs=1M count=2
aws --no-verify-ssl s3api put-object --bucket bucket --key test-putobject --body generated
aws --no-verify-ssl s3api get-object --bucket bucket --key test-putobject downloaded
diff -q generated downloaded
rm -f generated downloaded
- name: Test Multi-part Upload
run: |
set -e
dd if=/dev/urandom of=generated bs=1M count=32
aws --no-verify-ssl s3 cp --no-progress generated s3://bucket/test-multipart
aws --no-verify-ssl s3 cp --no-progress s3://bucket/test-multipart downloaded
diff -q generated downloaded
rm -f generated downloaded

25
.gitignore vendored
View file

@ -87,28 +87,3 @@ other/java/hdfs/dependency-reduced-pom.xml
# binary file
weed/weed
docker/weed
# test generated files
weed/*/*.jpg
docker/weed_sub
docker/weed_pub
weed/mq/schema/example.parquet
docker/agent_sub_record
test/mq/bin/consumer
test/mq/bin/producer
test/producer
bin/weed
weed_binary
/test/s3/copying/filerldb2
/filerldb2
/test/s3/retention/test-volume-data
test/s3/cors/weed-test.log
test/s3/cors/weed-server.pid
/test/s3/cors/test-volume-data
test/s3/cors/cors.test
/test/s3/retention/filerldb2
test/s3/retention/weed-server.pid
test/s3/retention/weed-test.log
/test/s3/versioning/test-volume-data
test/s3/versioning/weed-test.log

View file

@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2025 Chris Lu
Copyright 2016 Chris Lu
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View file

@ -1,71 +1,14 @@
.PHONY: test admin-generate admin-build admin-clean admin-dev admin-run admin-test admin-fmt admin-help
BINARY = weed
ADMIN_DIR = weed/admin
SOURCE_DIR = .
debug ?= 0
all: install
install: admin-generate
install:
cd weed; go install
warp_install:
go install github.com/minio/warp@v0.7.6
full_install:
cd weed; go install -tags "elastic gocdk sqlite ydb tikv"
full_install: admin-generate
cd weed; go install -tags "elastic gocdk sqlite ydb tarantool tikv rclone"
server: install
weed -v 0 server -s3 -filer -filer.maxMB=64 -volume.max=0 -master.volumeSizeLimitMB=100 -volume.preStopSeconds=1 -s3.port=8000 -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config=./docker/compose/s3.json -metricsPort=9324
benchmark: install warp_install
pkill weed || true
pkill warp || true
weed server -debug=$(debug) -s3 -filer -volume.max=0 -master.volumeSizeLimitMB=100 -volume.preStopSeconds=1 -s3.port=8000 -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=false -s3.config=./docker/compose/s3.json &
warp client &
while ! nc -z localhost 8000 ; do sleep 1 ; done
warp mixed --host=127.0.0.1:8000 --access-key=some_access_key1 --secret-key=some_secret_key1 --autoterm
pkill warp
pkill weed
# curl -o profile "http://127.0.0.1:6060/debug/pprof/profile?debug=1"
benchmark_with_pprof: debug = 1
benchmark_with_pprof: benchmark
test: admin-generate
cd weed; go test -tags "elastic gocdk sqlite ydb tarantool tikv rclone" -v ./...
# Admin component targets
admin-generate:
@echo "Generating admin component templates..."
@cd $(ADMIN_DIR) && $(MAKE) generate
admin-build: admin-generate
@echo "Building admin component..."
@cd $(ADMIN_DIR) && $(MAKE) build
admin-clean:
@echo "Cleaning admin component..."
@cd $(ADMIN_DIR) && $(MAKE) clean
admin-dev:
@echo "Starting admin development server..."
@cd $(ADMIN_DIR) && $(MAKE) dev
admin-run:
@echo "Running admin server..."
@cd $(ADMIN_DIR) && $(MAKE) run
admin-test:
@echo "Testing admin component..."
@cd $(ADMIN_DIR) && $(MAKE) test
admin-fmt:
@echo "Formatting admin component..."
@cd $(ADMIN_DIR) && $(MAKE) fmt
admin-help:
@echo "Admin component help..."
@cd $(ADMIN_DIR) && $(MAKE) help
tests:
cd weed; go test -tags "elastic gocdk sqlite ydb tikv" -v ./...

View file

@ -8,7 +8,7 @@
[![Wiki](https://img.shields.io/badge/docs-wiki-blue.svg)](https://github.com/seaweedfs/seaweedfs/wiki)
[![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs?maxAge=4800)](https://hub.docker.com/r/chrislusf/seaweedfs/)
[![SeaweedFS on Maven Central](https://img.shields.io/maven-central/v/com.github.chrislusf/seaweedfs-client)](https://search.maven.org/search?q=g:com.github.chrislusf)
[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/seaweedfs)](https://artifacthub.io/packages/search?repo=seaweedfs)
![SeaweedFS Logo](https://raw.githubusercontent.com/seaweedfs/seaweedfs/master/note/seaweedfs.png)
@ -32,9 +32,8 @@ Your support will be really appreciated by me and other supporters!
-->
### Gold Sponsors
[![nodion](https://raw.githubusercontent.com/seaweedfs/seaweedfs/master/note/sponsor_nodion.png)](https://www.nodion.com)
[![nodion](https://www.nodion.com/img/logo.svg)](https://www.nodion.com)
[![piknik](https://raw.githubusercontent.com/seaweedfs/seaweedfs/master/note/piknik.png)](https://www.piknik.com)
[![keepsec](https://raw.githubusercontent.com/seaweedfs/seaweedfs/master/note/keepsec.png)](https://www.keepsec.ca)
---
@ -46,7 +45,6 @@ Your support will be really appreciated by me and other supporters!
- [SeaweedFS Mailing List](https://groups.google.com/d/forum/seaweedfs)
- [Wiki Documentation](https://github.com/seaweedfs/seaweedfs/wiki)
- [SeaweedFS White Paper](https://github.com/seaweedfs/seaweedfs/wiki/SeaweedFS_Architecture.pdf)
- [SeaweedFS Introduction Slides 2025.5](https://docs.google.com/presentation/d/1tdkp45J01oRV68dIm4yoTXKJDof-EhainlA0LMXexQE/edit?usp=sharing)
- [SeaweedFS Introduction Slides 2021.5](https://docs.google.com/presentation/d/1DcxKWlINc-HNCjhYeERkpGXXm6nTCES8mi2W5G0Z4Ts/edit?usp=sharing)
- [SeaweedFS Introduction Slides 2019.3](https://www.slideshare.net/chrislusf/seaweedfs-introduction)
@ -73,7 +71,6 @@ Table of Contents
* [Installation Guide](#installation-guide)
* [Disk Related Topics](#disk-related-topics)
* [Benchmark](#benchmark)
* [Enterprise](#enterprise)
* [License](#license)
# Quick Start #
@ -84,7 +81,6 @@ Table of Contents
## Quick Start with Single Binary ##
* Download the latest binary from https://github.com/seaweedfs/seaweedfs/releases and unzip a single binary file `weed` or `weed.exe`. Or run `go install github.com/seaweedfs/seaweedfs/weed@latest`.
* `export AWS_ACCESS_KEY_ID=admin ; export AWS_SECRET_ACCESS_KEY=key` as the admin credentials to access the object store.
* Run `weed server -dir=/some/data/dir -s3` to start one master, one volume server, one filer, and one S3 gateway.
Also, to increase capacity, just add more volume servers by running `weed volume -dir="/some/data/dir2" -mserver="<master_host>:9333" -port=8081` locally, or on a different machine, or on thousands of machines. That is it!
@ -125,7 +121,7 @@ SeaweedFS can transparently integrate with the cloud.
With hot data on local cluster, and warm data on the cloud with O(1) access time,
SeaweedFS can achieve both fast local access time and elastic cloud storage capacity.
What's more, the cloud storage access API cost is minimized.
Faster and cheaper than direct cloud storage!
Faster and Cheaper than direct cloud storage!
[Back to TOC](#table-of-contents)
@ -136,7 +132,7 @@ Faster and cheaper than direct cloud storage!
* Automatic Gzip compression depending on file MIME type.
* Automatic compaction to reclaim disk space after deletion or update.
* [Automatic entry TTL expiration][VolumeServerTTL].
* Any server with some disk space can add to the total storage space.
* Any server with some disk spaces can add to the total storage space.
* Adding/Removing servers does **not** cause any data re-balancing unless triggered by admin commands.
* Optional picture resizing.
* Support ETag, Accept-Range, Last-Modified, etc.
@ -149,7 +145,7 @@ Faster and cheaper than direct cloud storage!
[Back to TOC](#table-of-contents)
## Filer Features ##
* [Filer server][Filer] provides "normal" directories and files via HTTP.
* [Filer server][Filer] provides "normal" directories and files via http.
* [File TTL][FilerTTL] automatically expires file metadata and actual file data.
* [Mount filer][Mount] reads and writes files directly as a local directory via FUSE.
* [Filer Store Replication][FilerStoreReplication] enables HA for filer meta data stores.
@ -378,7 +374,7 @@ Each individual file size is limited to the volume size.
### Saving memory ###
All file meta information stored on a volume server is readable from memory without disk access. Each file takes just a 16-byte map entry of <64bit key, 32bit offset, 32bit size>. Of course, each map entry has its own space cost for the map. But usually the disk space runs out before the memory does.
All file meta information stored on an volume server is readable from memory without disk access. Each file takes just a 16-byte map entry of <64bit key, 32bit offset, 32bit size>. Of course, each map entry has its own space cost for the map. But usually the disk space runs out before the memory does.
### Tiered Storage to the cloud ###
@ -586,78 +582,6 @@ Percentage of the requests served within a certain time (ms)
100% 54.1 ms
```
### Run WARP and launch a mixed benchmark. ###
```
make benchmark
warp: Benchmark data written to "warp-mixed-2023-10-16[102354]-l70a.csv.zst"
Mixed operations.
Operation: DELETE, 10%, Concurrency: 20, Ran 4m59s.
* Throughput: 6.19 obj/s
Operation: GET, 45%, Concurrency: 20, Ran 5m0s.
* Throughput: 279.85 MiB/s, 27.99 obj/s
Operation: PUT, 15%, Concurrency: 20, Ran 5m0s.
* Throughput: 89.86 MiB/s, 8.99 obj/s
Operation: STAT, 30%, Concurrency: 20, Ran 5m0s.
* Throughput: 18.63 obj/s
Cluster Total: 369.74 MiB/s, 61.79 obj/s, 0 errors over 5m0s.
```
To see segmented request statistics, use the --analyze.v parameter.
```
warp analyze --analyze.v warp-mixed-2023-10-16[102354]-l70a.csv.zst
18642 operations loaded... Done!
Mixed operations.
----------------------------------------
Operation: DELETE - total: 1854, 10.0%, Concurrency: 20, Ran 5m0s, starting 2023-10-16 10:23:57.115 +0500 +05
* Throughput: 6.19 obj/s
Requests considered: 1855:
* Avg: 104ms, 50%: 30ms, 90%: 207ms, 99%: 1.355s, Fastest: 1ms, Slowest: 4.613s, StdDev: 320ms
----------------------------------------
Operation: GET - total: 8388, 45.3%, Size: 10485760 bytes. Concurrency: 20, Ran 5m0s, starting 2023-10-16 10:23:57.12 +0500 +05
* Throughput: 279.77 MiB/s, 27.98 obj/s
Requests considered: 8389:
* Avg: 221ms, 50%: 106ms, 90%: 492ms, 99%: 1.739s, Fastest: 8ms, Slowest: 8.633s, StdDev: 383ms
* TTFB: Avg: 81ms, Best: 2ms, 25th: 24ms, Median: 39ms, 75th: 65ms, 90th: 171ms, 99th: 669ms, Worst: 4.783s StdDev: 163ms
* First Access: Avg: 240ms, 50%: 105ms, 90%: 511ms, 99%: 2.08s, Fastest: 12ms, Slowest: 8.633s, StdDev: 480ms
* First Access TTFB: Avg: 88ms, Best: 2ms, 25th: 24ms, Median: 38ms, 75th: 64ms, 90th: 179ms, 99th: 919ms, Worst: 4.783s StdDev: 199ms
* Last Access: Avg: 219ms, 50%: 106ms, 90%: 463ms, 99%: 1.782s, Fastest: 9ms, Slowest: 8.633s, StdDev: 416ms
* Last Access TTFB: Avg: 81ms, Best: 2ms, 25th: 24ms, Median: 39ms, 75th: 65ms, 90th: 161ms, 99th: 657ms, Worst: 4.783s StdDev: 176ms
----------------------------------------
Operation: PUT - total: 2688, 14.5%, Size: 10485760 bytes. Concurrency: 20, Ran 5m0s, starting 2023-10-16 10:23:57.115 +0500 +05
* Throughput: 89.83 MiB/s, 8.98 obj/s
Requests considered: 2689:
* Avg: 1.165s, 50%: 878ms, 90%: 2.015s, 99%: 5.74s, Fastest: 99ms, Slowest: 8.264s, StdDev: 968ms
----------------------------------------
Operation: STAT - total: 5586, 30.2%, Concurrency: 20, Ran 5m0s, starting 2023-10-16 10:23:57.113 +0500 +05
* Throughput: 18.63 obj/s
Requests considered: 5587:
* Avg: 15ms, 50%: 11ms, 90%: 34ms, 99%: 80ms, Fastest: 0s, Slowest: 245ms, StdDev: 17ms
* First Access: Avg: 14ms, 50%: 10ms, 90%: 33ms, 99%: 69ms, Fastest: 0s, Slowest: 203ms, StdDev: 16ms
* Last Access: Avg: 15ms, 50%: 11ms, 90%: 34ms, 99%: 74ms, Fastest: 0s, Slowest: 203ms, StdDev: 17ms
Cluster Total: 369.64 MiB/s, 61.77 obj/s, 0 errors over 5m0s.
Total Errors:0.
```
[Back to TOC](#table-of-contents)
## Enterprise ##
For enterprise users, please visit [seaweedfs.com](https://seaweedfs.com) for the SeaweedFS Enterprise Edition,
which has a self-healing storage format with better data protection.
[Back to TOC](#table-of-contents)
## License ##

View file

@ -8,7 +8,6 @@
- [Evercam Camera Management Software](https://evercam.io/)
- [Spherical Elephant GmbH](https://www.sphericalelephant.com)
- [WizardTales GmbH](https://www.wizardtales.com)
- [Nimbus Web Services](https://nimbusws.com)
- <h2 align="center">Backers</h2>

View file

@ -0,0 +1,44 @@
FROM gcc:11 as builder
RUN mkdir -p /go/src/github.com/seaweedfs/
RUN git clone https://github.com/seaweedfs/seaweedfs /go/src/github.com/seaweedfs/seaweedfs
ARG BRANCH=${BRANCH:-master}
RUN cd /go/src/github.com/seaweedfs/seaweedfs && git checkout $BRANCH
RUN cd /go/src/github.com/seaweedfs/seaweedfs/weed \
&& apt-get update \
&& apt-get install -y golang-src \
&& export LDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=$(git rev-parse --short HEAD)" \
&& CGO_ENABLED=0 go install -ldflags "-extldflags -static ${LDFLAGS}" -compiler=gccgo -tags gccgo,noasm
FROM alpine AS final
LABEL author="Chris Lu"
COPY --from=builder /go/bin/weed /usr/bin/
RUN mkdir -p /etc/seaweedfs
COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml
COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/entrypoint.sh /entrypoint.sh
RUN apk add fuse # for weed mount
# volume server gprc port
EXPOSE 18080
# volume server http port
EXPOSE 8080
# filer server gprc port
EXPOSE 18888
# filer server http port
EXPOSE 8888
# master server shared gprc port
EXPOSE 19333
# master server shared http port
EXPOSE 9333
# s3 server http port
EXPOSE 8333
# webdav server http port
EXPOSE 7333
RUN mkdir -p /data/filerldb2
VOLUME /data
WORKDIR /data
RUN chmod +x /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]

View file

@ -1,4 +1,4 @@
FROM golang:1.24-alpine as builder
FROM golang:1.20-alpine as builder
RUN apk add git g++ fuse
RUN mkdir -p /go/src/github.com/seaweedfs/
RUN git clone https://github.com/seaweedfs/seaweedfs /go/src/github.com/seaweedfs/seaweedfs
@ -6,7 +6,7 @@ ARG BRANCH=${BRANCH:-master}
ARG TAGS
RUN cd /go/src/github.com/seaweedfs/seaweedfs && git checkout $BRANCH
RUN cd /go/src/github.com/seaweedfs/seaweedfs/weed \
&& export LDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=$(git rev-parse --short HEAD)" \
&& export LDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=$(git rev-parse --short HEAD)" \
&& CGO_ENABLED=0 go install -tags "$TAGS" -ldflags "-extldflags -static ${LDFLAGS}"
FROM alpine AS final

View file

@ -1,8 +1,6 @@
FROM alpine AS final
LABEL author="Chris Lu"
COPY ./weed /usr/bin/
COPY ./weed_pub* /usr/bin/
COPY ./weed_sub* /usr/bin/
RUN mkdir -p /etc/seaweedfs
COPY ./filer.toml /etc/seaweedfs/filer.toml
COPY ./entrypoint.sh /entrypoint.sh

View file

@ -1,9 +1,9 @@
FROM golang:1.24 as builder
FROM golang:1.20-buster as builder
RUN apt-get update
RUN apt-get install -y build-essential libsnappy-dev zlib1g-dev libbz2-dev libgflags-dev liblz4-dev libzstd-dev
ENV ROCKSDB_VERSION v10.2.1
ENV ROCKSDB_VERSION v7.5.3
# build RocksDB
RUN cd /tmp && \

View file

@ -1,9 +1,9 @@
FROM golang:1.24 as builder
FROM golang:1.20-buster as builder
RUN apt-get update
RUN apt-get install -y build-essential libsnappy-dev zlib1g-dev libbz2-dev libgflags-dev liblz4-dev libzstd-dev
ENV ROCKSDB_VERSION v10.2.1
ENV ROCKSDB_VERSION v7.5.3
# build RocksDB
RUN cd /tmp && \
@ -21,7 +21,7 @@ RUN git clone https://github.com/seaweedfs/seaweedfs /go/src/github.com/seaweedf
ARG BRANCH=${BRANCH:-master}
RUN cd /go/src/github.com/seaweedfs/seaweedfs && git checkout $BRANCH
RUN cd /go/src/github.com/seaweedfs/seaweedfs/weed \
&& export LDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=$(git rev-parse --short HEAD)" \
&& export LDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=$(git rev-parse --short HEAD)" \
&& go install -tags "5BytesOffset rocksdb" -ldflags "-extldflags -static ${LDFLAGS}"

View file

@ -5,7 +5,7 @@ RUN mkdir -p /go/src/github.com/seaweedfs/
ADD . /go/src/github.com/seaweedfs/seaweedfs
RUN ls -al /go/src/github.com/seaweedfs/ && \
cd /go/src/github.com/seaweedfs/seaweedfs/weed \
&& export LDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=$(git rev-parse --short HEAD)" \
&& export LDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=$(git rev-parse --short HEAD)" \
&& go install -tags "5BytesOffset rocksdb" -ldflags "-extldflags -static ${LDFLAGS}"

View file

@ -25,7 +25,7 @@ ENV \
NOSETESTS_EXCLUDE="" \
NOSETESTS_ATTR="" \
NOSETESTS_OPTIONS="" \
S3TEST_CONF="/s3tests.conf"
S3TEST_CONF="/s3test.conf"
ENTRYPOINT ["/bin/bash", "-c"]
CMD ["sleep 30 && exec ./virtualenv/bin/nosetests ${NOSETESTS_OPTIONS-} ${NOSETESTS_ATTR:+-a $NOSETESTS_ATTR} ${NOSETESTS_EXCLUDE:+-e $NOSETESTS_EXCLUDE}"]

View file

@ -1,17 +0,0 @@
FROM tarantool/tarantool:3.3.1 AS builder
# install dependencies
RUN apt update && \
apt install -y git unzip cmake tt=2.7.0
# init tt dir structure, create dir for app, create symlink
RUN tt init && \
mkdir app && \
ln -sfn ${PWD}/app/ ${PWD}/instances.enabled/app
# copy cluster configs
COPY tarantool /opt/tarantool/app
# build app
RUN tt build app

View file

@ -7,10 +7,8 @@ gen: dev
cgo ?= 0
binary:
export SWCOMMIT=$(shell git rev-parse --short HEAD)
export SWLDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=$(SWCOMMIT)"
cd ../weed && CGO_ENABLED=$(cgo) GOOS=linux go build $(options) -tags "$(tags)" -ldflags "-s -w -extldflags -static $(SWLDFLAGS)" && mv weed ../docker/
cd ../other/mq_client_example/agent_pub_record && CGO_ENABLED=$(cgo) GOOS=linux go build && mv agent_pub_record ../../../docker/
cd ../other/mq_client_example/agent_sub_record && CGO_ENABLED=$(cgo) GOOS=linux go build && mv agent_sub_record ../../../docker/
export SWLDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=$(SWCOMMIT)"
cd ../weed && CGO_ENABLED=$(cgo) GOOS=linux go build $(options) -tags "$(tags)" -ldflags "-extldflags -static $(SWLDFLAGS)" && mv weed ../docker/
binary_race: options = -race
binary_race: cgo = 1
@ -22,7 +20,7 @@ build: binary
build_e2e: binary_race
docker build --no-cache -t chrislusf/seaweedfs:e2e -f Dockerfile.e2e .
go_build: # make go_build tags=elastic,ydb,gocdk,hdfs,5BytesOffset,tarantool
go_build: # make go_build tags=elastic,ydb,gocdk,hdfs,5BytesOffset
docker build --build-arg TAGS=$(tags) --no-cache -t chrislusf/seaweedfs:go_build -f Dockerfile.go_build .
go_build_large_disk:
@ -31,15 +29,12 @@ go_build_large_disk:
build_rocksdb_dev_env:
docker build --no-cache -t chrislusf/rocksdb_dev_env -f Dockerfile.rocksdb_dev_env .
build_rocksdb_local: build_rocksdb_dev_env
build_rocksdb_local:
cd .. ; docker build --no-cache -t chrislusf/seaweedfs:rocksdb_local -f docker/Dockerfile.rocksdb_large_local .
build_rocksdb:
docker build --no-cache -t chrislusf/seaweedfs:rocksdb -f Dockerfile.rocksdb_large .
build_tarantool_dev_env:
docker build --no-cache -t chrislusf/tarantool_dev_env -f Dockerfile.tarantool.dev_env .
s3tests_build:
docker build --no-cache -t chrislusf/ceph-s3-tests:local -f Dockerfile.s3tests .
@ -67,8 +62,7 @@ k8s: build
dev_registry: build
docker compose -f compose/local-registry-compose.yml -p seaweedfs up
dev_replicate:
docker build --build-arg TAGS=gocdk --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.go_build .
dev_replicate: build
docker compose -f compose/local-replicate-compose.yml -p seaweedfs up
dev_auditlog: build
@ -86,21 +80,12 @@ cluster: build
2mount: build
docker compose -f compose/local-sync-mount-compose.yml -p seaweedfs up
filer_backup: build
docker compose -f compose/local-filer-backup-compose.yml -p seaweedfs up
hashicorp_raft: build
docker compose -f compose/local-hashicorp-raft-compose.yml -p seaweedfs up
s3tests: build s3tests_build
docker compose -f compose/local-s3tests-compose.yml -p seaweedfs up
brokers: build
docker compose -f compose/local-brokers-compose.yml -p seaweedfs up
agent: build
docker compose -f compose/local-mq-test.yml -p seaweedfs up
filer_etcd: build
docker stack deploy -c compose/swarm-etcd.yml fs
@ -109,22 +94,19 @@ test_etcd: build
test_ydb: tags = ydb
test_ydb: build
export
docker compose -f compose/test-ydb-filer.yml -p seaweedfs up
test_tarantool: tags = tarantool
test_tarantool: build_tarantool_dev_env build
docker compose -f compose/test-tarantool-filer.yml -p seaweedfs up
clean:
rm ./weed
certstrap:
go install -v github.com/square/certstrap@latest
certstrap --depot-path compose/tls init --curve P-256 --passphrase "" --common-name "SeaweedFS CA" || true
certstrap --depot-path compose/tls request-cert --ou "SeaweedFS" --curve P-256 --passphrase "" --domain localhost --common-name volume01.dev || true
certstrap --depot-path compose/tls request-cert --ou "SeaweedFS" --curve P-256 --passphrase "" --common-name master01.dev || true
certstrap --depot-path compose/tls request-cert --ou "SeaweedFS" --curve P-256 --passphrase "" --common-name filer01.dev || true
certstrap --depot-path compose/tls request-cert --ou "SeaweedFS" --curve P-256 --passphrase "" --common-name client01.dev || true
go get github.com/square/certstrap
certstrap --depot-path compose/tls init --passphrase "" --common-name "SeaweedFS CA" || true
certstrap --depot-path compose/tls request-cert --passphrase "" --common-name volume01.dev || true
certstrap --depot-path compose/tls request-cert --passphrase "" --common-name master01.dev || true
certstrap --depot-path compose/tls request-cert --passphrase "" --common-name filer01.dev || true
certstrap --depot-path compose/tls request-cert --passphrase "" --common-name client01.dev || true
certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" volume01.dev || true
certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" master01.dev || true
certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" filer01.dev || true

View file

@ -1,8 +0,0 @@
<source>
@type forward
port 24224
</source>
<match **>
@type stdout # Output logs to container's stdout (visible via `docker logs`)
</match>

View file

@ -19,9 +19,7 @@ services:
depends_on:
- fluent
fluent:
image: fluent/fluentd:v1.17
volumes:
- ./fluent.conf:/fluentd/etc/fluent.conf
image: fluent/fluentd:v1.14
ports:
- 24224:24224
#s3tests:

View file

@ -1,127 +0,0 @@
version: '3.9'
services:
master0:
image: chrislusf/seaweedfs:local
ports:
- 9333:9333
- 19333:19333
command: "-v=0 master -volumeSizeLimitMB 100 -resumeState=false -ip=master0 -port=9333 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
environment:
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
master1:
image: chrislusf/seaweedfs:local
ports:
- 9334:9334
- 19334:19334
command: "-v=0 master -volumeSizeLimitMB 100 -resumeState=false -ip=master1 -port=9334 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
environment:
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
master2:
image: chrislusf/seaweedfs:local
ports:
- 9335:9335
- 19335:19335
command: "-v=0 master -volumeSizeLimitMB 100 -resumeState=false -ip=master2 -port=9335 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
environment:
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
volume1:
image: chrislusf/seaweedfs:local
ports:
- 8080:8080
- 18080:18080
command: 'volume -dataCenter=dc1 -rack=v1 -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume1 -publicUrl=localhost:8080 -preStopSeconds=1'
depends_on:
- master0
- master1
- master2
volume2:
image: chrislusf/seaweedfs:local
ports:
- 8082:8082
- 18082:18082
command: 'volume -dataCenter=dc2 -rack=v2 -mserver="master0:9333,master1:9334,master2:9335" -port=8082 -ip=volume2 -publicUrl=localhost:8082 -preStopSeconds=1'
depends_on:
- master0
- master1
- master2
volume3:
image: chrislusf/seaweedfs:local
ports:
- 8083:8083
- 18083:18083
command: 'volume -dataCenter=dc3 -rack=v3 -mserver="master0:9333,master1:9334,master2:9335" -port=8083 -ip=volume3 -publicUrl=localhost:8083 -preStopSeconds=1'
depends_on:
- master0
- master1
- master2
filer1:
image: chrislusf/seaweedfs:local
ports:
- 8888:8888
- 18888:18888
command: 'filer -defaultReplicaPlacement=100 -iam -master="master0:9333,master1:9334,master2:9335" -port=8888 -ip=filer1'
depends_on:
- master0
- master1
- master2
- volume1
- volume2
filer2:
image: chrislusf/seaweedfs:local
ports:
- 8889:8889
- 18889:18889
command: 'filer -defaultReplicaPlacement=100 -iam -master="master0:9333,master1:9334,master2:9335" -port=8889 -ip=filer2'
depends_on:
- master0
- master1
- master2
- volume1
- volume2
- filer1
broker1:
image: chrislusf/seaweedfs:local
ports:
- 17777:17777
command: 'mq.broker -master="master0:9333,master1:9334,master2:9335" -port=17777 -ip=broker1'
depends_on:
- master0
- master1
- master2
- volume1
- volume2
- filer1
- filer2
broker2:
image: chrislusf/seaweedfs:local
ports:
- 17778:17778
command: 'mq.broker -master="master0:9333,master1:9334,master2:9335" -port=17778 -ip=broker2'
depends_on:
- master0
- master1
- master2
- volume1
- volume2
- filer1
- filer2
broker3:
image: chrislusf/seaweedfs:local
ports:
- 17779:17779
command: 'mq.broker -master="master0:9333,master1:9334,master2:9335" -port=17779 -ip=broker3'
depends_on:
- master0
- master1
- master2
- volume1
- volume2
- filer1
- filer2

View file

@ -10,7 +10,7 @@ services:
- 18084:18080
- 8888:8888
- 18888:18888
command: "server -ip=server1 -filer -volume.max=0 -master.volumeSizeLimitMB=100 -volume.preStopSeconds=1"
command: "server -ip=server1 -filer -volume.max=0 -master.volumeSizeLimitMB=1024 -volume.preStopSeconds=1"
volumes:
- ./master-cloud.toml:/etc/seaweedfs/master.toml
depends_on:
@ -25,4 +25,4 @@ services:
- 8889:8888
- 18889:18888
- 8334:8333
command: "server -ip=server2 -filer -s3 -volume.max=0 -master.volumeSizeLimitMB=100 -volume.preStopSeconds=1"
command: "server -ip=server2 -filer -s3 -volume.max=0 -master.volumeSizeLimitMB=1024 -volume.preStopSeconds=1"

View file

@ -6,7 +6,7 @@ services:
ports:
- 9333:9333
- 19333:19333
command: "-v=1 master -ip=master -volumeSizeLimitMB=10"
command: "-v=1 master -ip=master"
volumes:
- ./tls:/etc/seaweedfs/tls
env_file:
@ -16,7 +16,7 @@ services:
ports:
- 8080:8080
- 18080:18080
command: "-v=1 volume -mserver=master:9333 -port=8080 -ip=volume -preStopSeconds=1 -max=10000"
command: "-v=1 volume -mserver=master:9333 -port=8080 -ip=volume -preStopSeconds=1"
depends_on:
- master
volumes:
@ -26,9 +26,10 @@ services:
filer:
image: chrislusf/seaweedfs:local
ports:
- 8111:8111
- 8888:8888
- 18888:18888
command: '-v=1 filer -ip.bind=0.0.0.0 -master="master:9333"'
command: '-v=1 filer -ip.bind=0.0.0.0 -master="master:9333" -iam -iam.ip=filer'
depends_on:
- master
- volume
@ -36,19 +37,6 @@ services:
- ./tls:/etc/seaweedfs/tls
env_file:
- ${ENV_FILE:-dev.env}
iam:
image: chrislusf/seaweedfs:local
ports:
- 8111:8111
command: '-v=1 iam -filer="filer:8888" -master="master:9333"'
depends_on:
- master
- volume
- filer
volumes:
- ./tls:/etc/seaweedfs/tls
s3:
image: chrislusf/seaweedfs:local
ports:
@ -62,7 +50,6 @@ services:
- ./tls:/etc/seaweedfs/tls
env_file:
- ${ENV_FILE:-dev.env}
mount:
image: chrislusf/seaweedfs:local
privileged: true

View file

@ -1,54 +0,0 @@
version: '3.9'
services:
server-left:
image: chrislusf/seaweedfs:local
command: "-v=0 server -ip=server-left -filer -filer.maxMB 5 -s3 -s3.config=/etc/seaweedfs/s3.json -volume.max=0 -master.volumeSizeLimitMB=100 -volume.preStopSeconds=1"
volumes:
- ./s3.json:/etc/seaweedfs/s3.json
healthcheck:
test: [ "CMD", "curl", "--fail", "-I", "http://localhost:9333/cluster/healthz" ]
interval: 3s
start_period: 15s
timeout: 30s
server-right:
image: chrislusf/seaweedfs:local
command: "-v=0 server -ip=server-right -filer -filer.maxMB 64 -s3 -s3.config=/etc/seaweedfs/s3.json -volume.max=0 -master.volumeSizeLimitMB=100 -volume.preStopSeconds=1"
volumes:
- ./s3.json:/etc/seaweedfs/s3.json
healthcheck:
test: [ "CMD", "curl", "--fail", "-I", "http://localhost:9333/cluster/healthz" ]
interval: 3s
start_period: 15s
timeout: 30s
filer-backup:
image: chrislusf/seaweedfs:local
command: "-v=0 filer.backup -debug -doDeleteFiles=False -filer server-left:8888"
volumes:
- ./replication.toml:/etc/seaweedfs/replication.toml
environment:
WEED_SINK_LOCAL_INCREMENTAL_ENABLED: "false"
WEED_SINK_S3_ENABLED: "true"
WEED_SINK_S3_BUCKET: "backup"
WEED_SINK_S3_ENDPOINT: "http://server-right:8333"
WEED_SINK_S3_DIRECTORY: "/"
WEED_SINK_S3_AWS_ACCESS_KEY_ID: "some_access_key1"
WEED_SINK_S3_AWS_SECRET_ACCESS_KEY: "some_secret_key1"
WEED_SINK_S3_S3_DISABLE_CONTENT_MD5_VALIDATION: "false"
WEED_SINK_S3_UPLOADER_PART_SIZE_MB: "5"
WEED_SINK_S3_KEEP_PART_SIZE: "false"
depends_on:
server-left:
condition: service_healthy
server-right:
condition: service_healthy
minio-warp:
image: minio/warp
command: 'mixed --duration 5s --obj.size=6mb --md5 --objects 10 --concurrent 2'
restart: on-failure
environment:
WARP_HOST: "server-left:8333"
WARP_ACCESS_KEY: "some_access_key1"
WARP_SECRET_KEY: "some_secret_key1"
depends_on:
- filer-backup

View file

@ -6,7 +6,7 @@ services:
ports:
- 9333:9333
- 19333:19333
command: "master -ip=master -volumeSizeLimitMB=100"
command: "master -ip=master -volumeSizeLimitMB=1024"
volume:
image: chrislusf/seaweedfs:local
ports:

View file

@ -1,32 +0,0 @@
services:
server:
image: chrislusf/seaweedfs:local
ports:
- 9333:9333
- 19333:19333
- 8888:8888
- 18888:18888
command: "server -ip=server -filer -volume.max=0 -master.volumeSizeLimitMB=8 -volume.preStopSeconds=1"
healthcheck:
test: curl -f http://localhost:8888/healthz
mq_broker:
image: chrislusf/seaweedfs:local
ports:
- 17777:17777
command: "mq.broker -master=server:9333 -ip=mq_broker"
depends_on:
server:
condition: service_healthy
mq_agent:
image: chrislusf/seaweedfs:local
ports:
- 16777:16777
command: "mq.agent -broker=mq_broker:17777 -port=16777"
depends_on:
- mq_broker
mq_client:
image: chrislusf/seaweedfs:local
# run a custom command instead of entrypoint
command: "ls -al"
depends_on:
- mq_agent

View file

@ -6,7 +6,7 @@ services:
ports:
- 9333:9333
- 19333:19333
command: "master -ip=master -volumeSizeLimitMB=100"
command: "master -ip=master -volumeSizeLimitMB=1024"
volume:
image: chrislusf/seaweedfs:local
ports:

View file

@ -3,54 +3,19 @@ services:
node1:
image: chrislusf/seaweedfs:local
command: "server -master -volume -filer"
ports:
- 8888:8888
- 18888:18888
healthcheck:
test: [ "CMD", "curl", "--fail", "-I", "http://localhost:9333/cluster/healthz" ]
interval: 1s
start_period: 10s
timeout: 30s
mount1:
image: chrislusf/seaweedfs:local
privileged: true
command: "mount -filer=node1:8888 -dir=/mnt -dirAutoCreate"
healthcheck:
test: [ "CMD", "curl", "--fail", "-I", "http://node1:8888/" ]
interval: 1s
start_period: 10s
timeout: 30s
depends_on:
node1:
condition: service_healthy
node2:
image: chrislusf/seaweedfs:local
ports:
- 7888:8888
- 17888:18888
command: "server -master -volume -filer"
healthcheck:
test: [ "CMD", "curl", "--fail", "-I", "http://localhost:9333/cluster/healthz" ]
interval: 1s
start_period: 10s
timeout: 30s
mount2:
image: chrislusf/seaweedfs:local
privileged: true
command: "mount -filer=node2:8888 -dir=/mnt -dirAutoCreate"
healthcheck:
test: [ "CMD", "curl", "--fail", "-I", "http://node2:8888/" ]
interval: 1s
start_period: 10s
timeout: 30s
depends_on:
node2:
condition: service_healthy
sync:
image: chrislusf/seaweedfs:local
command: "-v=4 filer.sync -a=node1:8888 -b=node2:8888 -a.debug -b.debug"
depends_on:
mount1:
condition: service_healthy
mount2:
condition: service_healthy

View file

@ -40,10 +40,7 @@
"List",
"Tagging",
"Write"
],
"account": {
"id": "testid"
}
]
},
{
"name": "s3_tests_alt",
@ -104,12 +101,5 @@
"Write"
]
}
],
"accounts": [
{
"id" : "testid",
"displayName": "M. Tester",
"emailAddress": "tester@ceph.com"
}
]
]
}

View file

@ -2,7 +2,7 @@
## this section is just used for host, port and bucket_prefix
# host set for rgw in vstart.sh
host = 127.0.0.1
host = s3
# port set for rgw in vstart.sh
port = 8000
@ -68,36 +68,3 @@ secret_key = opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab
# tenant email set in vstart.sh
email = tenanteduser@example.com
# tenant name
tenant = testx
[iam]
#used for iam operations in sts-tests
#email from vstart.sh
email = s3@example.com
#user_id from vstart.sh
user_id = 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef
#access_key from vstart.sh
access_key = ABCDEFGHIJKLMNOPQRST
#secret_key from vstart.sh
secret_key = abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz
#display_name from vstart.sh
display_name = youruseridhere
[iam root]
access_key = AAAAAAAAAAAAAAAAAAaa
secret_key = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
user_id = RGW11111111111111111
email = account1@ceph.com
# iam account root user in a different account than [iam root]
[iam alt root]
access_key = BBBBBBBBBBBBBBBBBBbb
secret_key = bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
user_id = RGW22222222222222222
email = account2@ceph.com

View file

@ -11,7 +11,7 @@ services:
ports:
- 9333:9333
- 19333:19333
command: "master -ip=master -volumeSizeLimitMB=100"
command: "master -ip=master -volumeSizeLimitMB=1024"
volume:
image: chrislusf/seaweedfs:local
ports:
@ -30,7 +30,6 @@ services:
environment:
WEED_LEVELDB2_ENABLED: 'false'
WEED_ETCD_ENABLED: 'true'
WEED_ETCD_KEY_PREFIX: 'seaweedfs.'
WEED_ETCD_SERVERS: "http://etcd:2379"
volumes:
- ./s3.json:/etc/seaweedfs/s3.json

View file

@ -1,30 +0,0 @@
version: '3.9'
services:
tarantool:
image: chrislusf/tarantool_dev_env
entrypoint: "tt start app -i"
environment:
APP_USER_PASSWORD: "app"
CLIENT_USER_PASSWORD: "client"
REPLICATOR_USER_PASSWORD: "replicator"
STORAGE_USER_PASSWORD: "storage"
network_mode: "host"
ports:
- "3303:3303"
s3:
image: chrislusf/seaweedfs:local
command: "server -ip=127.0.0.1 -filer -master.volumeSizeLimitMB=16 -volume.max=0 -volume -volume.preStopSeconds=1 -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8000 -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=false"
volumes:
- ./s3.json:/etc/seaweedfs/s3.json
environment:
WEED_LEVELDB2_ENABLED: "false"
WEED_TARANTOOL_ENABLED: "true"
WEED_TARANTOOL_ADDRESS: "127.0.0.1:3303"
WEED_TARANTOOL_USER: "client"
WEED_TARANTOOL_PASSWORD: "client"
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
network_mode: "host"
depends_on:
- tarantool

View file

@ -12,9 +12,5 @@ WEED_GRPC_MASTER_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,clie
WEED_GRPC_VOLUME_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev"
WEED_GRPC_FILER_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev"
WEED_GRPC_CLIENT_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev"
WEED_HTTPS_CLIENT_ENABLE=true
WEED_HTTPS_VOLUME_CERT=/etc/seaweedfs/tls/volume01.dev.crt
WEED_HTTPS_VOLUME_KEY=/etc/seaweedfs/tls/volume01.dev.key
WEED_HTTPS_VOLUME_CA=/etc/seaweedfs/tls/SeaweedFS_CA.crt
#GRPC_GO_LOG_SEVERITY_LEVEL=info
#GRPC_GO_LOG_VERBOSITY_LEVEL=2

View file

@ -1,37 +0,0 @@
[
{
"Username": "admin",
"Password": "myadminpassword",
"PublicKeys": [
],
"HomeDir": "/",
"Permissions": {
"/": ["*"]
},
"Uid": 0,
"Gid": 0
},
{
"Username": "user1",
"Password": "myuser1password",
"PublicKeys": [""],
"HomeDir": "/user1",
"Permissions": {
"/user1": ["*"],
"/public": ["read", "list","write"]
},
"Uid": 1111,
"Gid": 1111
},
{
"Username": "readonly",
"Password": "myreadonlypassword",
"PublicKeys": [],
"HomeDir": "/public",
"Permissions": {
"/public": ["read", "list"]
},
"Uid": 1112,
"Gid": 1112
}
]

View file

@ -1,14 +0,0 @@
package = 'app'
version = 'scm-1'
source = {
url = '/dev/null',
}
dependencies = {
'crud == 1.5.2-1',
'expirationd == 1.6.0-1',
'metrics-export-role == 0.3.0-1',
'vshard == 0.1.32-1'
}
build = {
type = 'none';
}

View file

@ -1,145 +0,0 @@
config:
context:
app_user_password:
from: env
env: APP_USER_PASSWORD
client_user_password:
from: env
env: CLIENT_USER_PASSWORD
replicator_user_password:
from: env
env: REPLICATOR_USER_PASSWORD
storage_user_password:
from: env
env: STORAGE_USER_PASSWORD
credentials:
roles:
crud-role:
privileges:
- permissions: [ "execute" ]
lua_call: [ "crud.delete", "crud.get", "crud.upsert" ]
users:
app:
password: '{{ context.app_user_password }}'
roles: [ public, crud-role ]
client:
password: '{{ context.client_user_password }}'
roles: [ super ]
replicator:
password: '{{ context.replicator_user_password }}'
roles: [ replication ]
storage:
password: '{{ context.storage_user_password }}'
roles: [ sharding ]
iproto:
advertise:
peer:
login: replicator
sharding:
login: storage
sharding:
bucket_count: 10000
metrics:
include: [ all ]
exclude: [ vinyl ]
labels:
alias: '{{ instance_name }}'
groups:
storages:
roles:
- roles.crud-storage
- roles.expirationd
- roles.metrics-export
roles_cfg:
roles.expirationd:
cfg:
metrics: true
filer_metadata_task:
space: filer_metadata
is_expired: filer_metadata.is_expired
options:
atomic_iteration: true
force: true
index: 'expire_at_idx'
iterator_type: GT
start_key:
- 0
tuples_per_iteration: 10000
app:
module: storage
sharding:
roles: [ storage ]
replication:
failover: election
database:
use_mvcc_engine: true
replicasets:
storage-001:
instances:
storage-001-a:
roles_cfg:
roles.metrics-export:
http:
- listen: '0.0.0.0:8081'
endpoints:
- path: /metrics/prometheus/
format: prometheus
- path: /metrics/json
format: json
iproto:
listen:
- uri: 127.0.0.1:3301
advertise:
client: 127.0.0.1:3301
storage-001-b:
roles_cfg:
roles.metrics-export:
http:
- listen: '0.0.0.0:8082'
endpoints:
- path: /metrics/prometheus/
format: prometheus
- path: /metrics/json
format: json
iproto:
listen:
- uri: 127.0.0.1:3302
advertise:
client: 127.0.0.1:3302
routers:
roles:
- roles.crud-router
- roles.metrics-export
roles_cfg:
roles.crud-router:
stats: true
stats_driver: metrics
stats_quantiles: true
app:
module: router
sharding:
roles: [ router ]
replicasets:
router-001:
instances:
router-001-a:
roles_cfg:
roles.metrics-export:
http:
- listen: '0.0.0.0:8083'
endpoints:
- path: /metrics/prometheus/
format: prometheus
- path: /metrics/json
format: json
iproto:
listen:
- uri: 127.0.0.1:3303
advertise:
client: 127.0.0.1:3303

View file

@ -1,7 +0,0 @@
---
storage-001-a:
storage-001-b:
router-001-a:

View file

@ -1,77 +0,0 @@
local vshard = require('vshard')
local log = require('log')
-- Bootstrap the vshard router.
while true do
local ok, err = vshard.router.bootstrap({
if_not_bootstrapped = true,
})
if ok then
break
end
log.info(('Router bootstrap error: %s'):format(err))
end
-- functions for filer_metadata space
local filer_metadata = {
delete_by_directory_idx = function(directory)
-- find all storages
local storages = require('vshard').router.routeall()
-- on each storage
for _, storage in pairs(storages) do
-- call local function
local result, err = storage:callrw('filer_metadata.delete_by_directory_idx', { directory })
-- check for error
if err then
error("Failed to call function on storage: " .. tostring(err))
end
end
-- return
return true
end,
find_by_directory_idx_and_name = function(dirPath, startFileName, includeStartFile, limit)
-- init results
local results = {}
-- find all storages
local storages = require('vshard').router.routeall()
-- on each storage
for _, storage in pairs(storages) do
-- call local function
local result, err = storage:callro('filer_metadata.find_by_directory_idx_and_name', {
dirPath,
startFileName,
includeStartFile,
limit
})
-- check for error
if err then
error("Failed to call function on storage: " .. tostring(err))
end
-- add to results
for _, tuple in ipairs(result) do
table.insert(results, tuple)
end
end
-- sort
table.sort(results, function(a, b) return a[3] < b[3] end)
-- apply limit
if #results > limit then
local limitedResults = {}
for i = 1, limit do
table.insert(limitedResults, results[i])
end
results = limitedResults
end
-- return
return results
end,
}
rawset(_G, 'filer_metadata', filer_metadata)
-- register functions for filer_metadata space, set grants
for name, _ in pairs(filer_metadata) do
box.schema.func.create('filer_metadata.' .. name, { if_not_exists = true })
box.schema.user.grant('app', 'execute', 'function', 'filer_metadata.' .. name, { if_not_exists = true })
box.schema.user.grant('client', 'execute', 'function', 'filer_metadata.' .. name, { if_not_exists = true })
end

View file

@ -1,97 +0,0 @@
box.watch('box.status', function()
if box.info.ro then
return
end
-- ====================================
-- key_value space
-- ====================================
box.schema.create_space('key_value', {
format = {
{ name = 'key', type = 'string' },
{ name = 'bucket_id', type = 'unsigned' },
{ name = 'value', type = 'string' }
},
if_not_exists = true
})
-- create key_value space indexes
box.space.key_value:create_index('id', {type = 'tree', parts = { 'key' }, unique = true, if_not_exists = true})
box.space.key_value:create_index('bucket_id', { type = 'tree', parts = { 'bucket_id' }, unique = false, if_not_exists = true })
-- ====================================
-- filer_metadata space
-- ====================================
box.schema.create_space('filer_metadata', {
format = {
{ name = 'directory', type = 'string' },
{ name = 'bucket_id', type = 'unsigned' },
{ name = 'name', type = 'string' },
{ name = 'expire_at', type = 'unsigned' },
{ name = 'data', type = 'string' }
},
if_not_exists = true
})
-- create filer_metadata space indexes
box.space.filer_metadata:create_index('id', {type = 'tree', parts = { 'directory', 'name' }, unique = true, if_not_exists = true})
box.space.filer_metadata:create_index('bucket_id', { type = 'tree', parts = { 'bucket_id' }, unique = false, if_not_exists = true })
box.space.filer_metadata:create_index('directory_idx', { type = 'tree', parts = { 'directory' }, unique = false, if_not_exists = true })
box.space.filer_metadata:create_index('name_idx', { type = 'tree', parts = { 'name' }, unique = false, if_not_exists = true })
box.space.filer_metadata:create_index('expire_at_idx', { type = 'tree', parts = { 'expire_at' }, unique = false, if_not_exists = true})
end)
-- functions for filer_metadata space
local filer_metadata = {
delete_by_directory_idx = function(directory)
local space = box.space.filer_metadata
local index = space.index.directory_idx
-- for each finded directories
for _, tuple in index:pairs({ directory }, { iterator = 'EQ' }) do
space:delete({ tuple[1], tuple[3] })
end
return true
end,
find_by_directory_idx_and_name = function(dirPath, startFileName, includeStartFile, limit)
local space = box.space.filer_metadata
local directory_idx = space.index.directory_idx
-- choose filter name function
local filter_filename_func
if includeStartFile then
filter_filename_func = function(value) return value >= startFileName end
else
filter_filename_func = function(value) return value > startFileName end
end
-- init results
local results = {}
-- for each finded directories
for _, tuple in directory_idx:pairs({ dirPath }, { iterator = 'EQ' }) do
-- filter by name
if filter_filename_func(tuple[3]) then
table.insert(results, tuple)
end
end
-- sort
table.sort(results, function(a, b) return a[3] < b[3] end)
-- apply limit
if #results > limit then
local limitedResults = {}
for i = 1, limit do
table.insert(limitedResults, results[i])
end
results = limitedResults
end
-- return
return results
end,
is_expired = function(args, tuple)
return (tuple[4] > 0) and (require('fiber').time() > tuple[4])
end
}
-- register functions for filer_metadata space, set grants
rawset(_G, 'filer_metadata', filer_metadata)
for name, _ in pairs(filer_metadata) do
box.schema.func.create('filer_metadata.' .. name, { setuid = true, if_not_exists = true })
box.schema.user.grant('storage', 'execute', 'function', 'filer_metadata.' .. name, { if_not_exists = true })
end

View file

@ -1,274 +0,0 @@
#!/usr/bin/env python3
# /// script
# requires-python = ">=3.12"
# dependencies = [
# "boto3",
# ]
# ///
import argparse
import json
import random
import string
import subprocess
from enum import Enum
from pathlib import Path
import boto3
REGION_NAME = "us-east-1"
class Actions(str, Enum):
Get = "Get"
Put = "Put"
List = "List"
def get_user_dir(bucket_name, user, with_bucket=True):
if with_bucket:
return f"{bucket_name}/user-id-{user}"
return f"user-id-{user}"
def create_power_user():
power_user_key = "power_user_key"
power_user_secret = "power_user_secret"
command = f"s3.configure -apply -user poweruser -access_key {power_user_key} -secret_key {power_user_secret} -actions Admin"
print("Creating Power User...")
subprocess.run(
["docker", "exec", "-i", "seaweedfs-master-1", "weed", "shell"],
input=command,
text=True,
stdout=subprocess.PIPE,
)
print(
f"Power User created with key: {power_user_key} and secret: {power_user_secret}"
)
return power_user_key, power_user_secret
def create_bucket(s3_client, bucket_name):
print(f"Creating Bucket {bucket_name}...")
s3_client.create_bucket(Bucket=bucket_name)
print(f"Bucket {bucket_name} created.")
def upload_file(s3_client, bucket_name, user, file_path, custom_remote_path=None):
user_dir = get_user_dir(bucket_name, user, with_bucket=False)
if custom_remote_path:
remote_path = custom_remote_path
else:
remote_path = f"{user_dir}/{str(Path(file_path).name)}"
print(f"Uploading {file_path} for {user}... on {user_dir}")
s3_client.upload_file(file_path, bucket_name, remote_path)
print(f"File {file_path} uploaded for {user}.")
def create_user(iam_client, user):
print(f"Creating user {user}...")
response = iam_client.create_access_key(UserName=user)
print(
f"User {user} created with access key: {response['AccessKey']['AccessKeyId']}"
)
return response
def list_files(s3_client, bucket_name, path=None):
if path is None:
path = ""
print(f"Listing files of s3://{bucket_name}/{path}...")
try:
response = s3_client.list_objects_v2(Bucket=bucket_name, Prefix=path)
if "Contents" in response:
for obj in response["Contents"]:
print(f"\t - {obj['Key']}")
else:
print("No files found.")
except Exception as e:
print(f"Error listing files: {e}")
def create_policy_for_user(
iam_client, user, bucket_name, actions=[Actions.Get, Actions.List]
):
print(f"Creating policy for {user} on {bucket_name}...")
policy_document = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [f"s3:{action.value}*" for action in actions],
"Resource": [
f"arn:aws:s3:::{get_user_dir(bucket_name, user)}/*",
],
}
],
}
policy_name = f"{user}-{bucket_name}-full-access"
policy_json = json.dumps(policy_document)
filepath = f"/tmp/{policy_name}.json"
with open(filepath, "w") as f:
f.write(json.dumps(policy_document, indent=2))
iam_client.put_user_policy(
PolicyName=policy_name, PolicyDocument=policy_json, UserName=user
)
print(f"Policy for {user} on {bucket_name} created.")
def main():
parser = argparse.ArgumentParser(description="SeaweedFS S3 Test Script")
parser.add_argument(
"--s3-url", default="http://127.0.0.1:8333", help="S3 endpoint URL"
)
parser.add_argument(
"--iam-url", default="http://127.0.0.1:8111", help="IAM endpoint URL"
)
args = parser.parse_args()
bucket_name = (
f"test-bucket-{''.join(random.choices(string.digits + 'abcdef', k=8))}"
)
sentinel_file = "/tmp/SENTINEL"
with open(sentinel_file, "w") as f:
f.write("Hello World")
print(f"SENTINEL file created at {sentinel_file}")
power_user_key, power_user_secret = create_power_user()
admin_s3_client = get_s3_client(args, power_user_key, power_user_secret)
iam_client = get_iam_client(args, power_user_key, power_user_secret)
create_bucket(admin_s3_client, bucket_name)
upload_file(admin_s3_client, bucket_name, "Alice", sentinel_file)
upload_file(admin_s3_client, bucket_name, "Bob", sentinel_file)
list_files(admin_s3_client, bucket_name)
alice_user_info = create_user(iam_client, "Alice")
bob_user_info = create_user(iam_client, "Bob")
alice_key = alice_user_info["AccessKey"]["AccessKeyId"]
alice_secret = alice_user_info["AccessKey"]["SecretAccessKey"]
bob_key = bob_user_info["AccessKey"]["AccessKeyId"]
bob_secret = bob_user_info["AccessKey"]["SecretAccessKey"]
# Make sure Admin can read any files
list_files(admin_s3_client, bucket_name)
list_files(
admin_s3_client,
bucket_name,
get_user_dir(bucket_name, "Alice", with_bucket=False),
)
list_files(
admin_s3_client,
bucket_name,
get_user_dir(bucket_name, "Bob", with_bucket=False),
)
# Create read policy for Alice and Bob
create_policy_for_user(iam_client, "Alice", bucket_name)
create_policy_for_user(iam_client, "Bob", bucket_name)
alice_s3_client = get_s3_client(args, alice_key, alice_secret)
# Make sure Alice can read her files
list_files(
alice_s3_client,
bucket_name,
get_user_dir(bucket_name, "Alice", with_bucket=False) + "/",
)
# Make sure Bob can read his files
bob_s3_client = get_s3_client(args, bob_key, bob_secret)
list_files(
bob_s3_client,
bucket_name,
get_user_dir(bucket_name, "Bob", with_bucket=False) + "/",
)
# Update policy to include write
create_policy_for_user(iam_client, "Alice", bucket_name, actions=[Actions.Put, Actions.Get, Actions.List]) # fmt: off
create_policy_for_user(iam_client, "Bob", bucket_name, actions=[Actions.Put, Actions.Get, Actions.List]) # fmt: off
print("############################# Make sure Alice can write her files")
upload_file(
alice_s3_client,
bucket_name,
"Alice",
sentinel_file,
custom_remote_path=f"{get_user_dir(bucket_name, 'Alice', with_bucket=False)}/SENTINEL_by_Alice",
)
print("############################# Make sure Bob can write his files")
upload_file(
bob_s3_client,
bucket_name,
"Bob",
sentinel_file,
custom_remote_path=f"{get_user_dir(bucket_name, 'Bob', with_bucket=False)}/SENTINEL_by_Bob",
)
print("############################# Make sure Alice can read her new files")
list_files(
alice_s3_client,
bucket_name,
get_user_dir(bucket_name, "Alice", with_bucket=False) + "/",
)
print("############################# Make sure Bob can read his new files")
list_files(
bob_s3_client,
bucket_name,
get_user_dir(bucket_name, "Bob", with_bucket=False) + "/",
)
print("############################# Make sure Bob cannot read Alice's files")
list_files(
bob_s3_client,
bucket_name,
get_user_dir(bucket_name, "Alice", with_bucket=False) + "/",
)
print("############################# Make sure Alice cannot read Bob's files")
list_files(
alice_s3_client,
bucket_name,
get_user_dir(bucket_name, "Bob", with_bucket=False) + "/",
)
def get_iam_client(args, access_key, secret_key):
iam_client = boto3.client(
"iam",
endpoint_url=args.iam_url,
region_name=REGION_NAME,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
)
return iam_client
def get_s3_client(args, access_key, secret_key):
s3_client = boto3.client(
"s3",
endpoint_url=args.s3_url,
region_name=REGION_NAME,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
)
return s3_client
if __name__ == "__main__":
main()

484
go.mod
View file

@ -1,25 +1,23 @@
module github.com/seaweedfs/seaweedfs
go 1.24
toolchain go1.24.1
go 1.20
require (
cloud.google.com/go v0.121.4 // indirect
cloud.google.com/go/pubsub v1.49.0
cloud.google.com/go/storage v1.55.0
cloud.google.com/go v0.107.0 // indirect
cloud.google.com/go/pubsub v1.28.0
cloud.google.com/go/storage v1.29.0
github.com/Azure/azure-pipeline-go v0.2.3
github.com/Azure/azure-storage-blob-go v0.15.0
github.com/Shopify/sarama v1.38.1
github.com/aws/aws-sdk-go v1.55.7
github.com/aws/aws-sdk-go v1.44.189
github.com/beorn7/perks v1.0.1 // indirect
github.com/bwmarrin/snowflake v0.3.0
github.com/cenkalti/backoff/v4 v4.3.0
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/coreos/go-semver v0.3.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/disintegration/imaging v1.6.2
github.com/dustin/go-humanize v1.0.1
github.com/eapache/go-resiliency v1.3.0 // indirect
github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 // indirect
@ -29,383 +27,217 @@ require (
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/go-redsync/redsync/v4 v4.13.0
github.com/go-sql-driver/mysql v1.9.3
github.com/fclairamb/ftpserverlib v0.21.0
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/go-errors/errors v1.1.1 // indirect
github.com/go-redis/redis/v8 v8.11.5
github.com/go-redsync/redsync/v4 v4.8.1
github.com/go-sql-driver/mysql v1.7.0
github.com/go-zookeeper/zk v1.0.3 // indirect
github.com/gocql/gocql v1.7.0
github.com/golang/protobuf v1.5.4
github.com/golang/snappy v1.0.0 // indirect
github.com/google/btree v1.1.3
github.com/google/uuid v1.6.0
github.com/google/wire v0.6.0 // indirect
github.com/googleapis/gax-go/v2 v2.15.0 // indirect
github.com/gorilla/mux v1.8.1
github.com/gocql/gocql v0.0.0-20210707082121-9a3953d1826d
github.com/golang-jwt/jwt v3.2.2+incompatible
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2
github.com/golang/snappy v0.0.4 // indirect
github.com/google/btree v1.1.2
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/uuid v1.3.0
github.com/google/wire v0.5.0 // indirect
github.com/googleapis/gax-go/v2 v2.7.0 // indirect
github.com/gorilla/mux v1.8.0
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-uuid v1.0.3 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/jcmturner/gofork v1.7.6 // indirect
github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect
github.com/jinzhu/copier v0.4.0
github.com/jcmturner/gokrb5/v8 v8.4.3 // indirect
github.com/jinzhu/copier v0.3.5
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/json-iterator/go v1.1.12
github.com/karlseguin/ccache/v2 v2.0.8
github.com/klauspost/compress v1.18.0 // indirect
github.com/klauspost/reedsolomon v1.12.5
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/klauspost/compress v1.15.14 // indirect
github.com/klauspost/reedsolomon v1.11.7
github.com/kurin/blazer v0.5.3
github.com/lib/pq v1.10.9
github.com/linxGnu/grocksdb v1.10.1
github.com/lib/pq v1.10.7
github.com/linxGnu/grocksdb v1.7.14
github.com/magiconair/properties v1.8.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-ieproxy v0.0.11 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-ieproxy v0.0.9 // indirect
github.com/mattn/go-isatty v0.0.17 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/olivere/elastic/v7 v7.0.32
github.com/peterh/liner v1.2.2
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/posener/complete v1.2.3
github.com/pquerna/cachecontrol v0.2.0
github.com/prometheus/client_golang v1.22.0
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.64.0 // indirect
github.com/prometheus/procfs v0.17.0
github.com/pquerna/cachecontrol v0.1.0
github.com/prometheus/client_golang v1.14.0
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.9.0
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
github.com/seaweedfs/goexif v1.0.3
github.com/seaweedfs/raft v1.1.3
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/spf13/afero v1.12.0 // indirect
github.com/spf13/cast v1.7.1 // indirect
github.com/spf13/viper v1.20.1
github.com/stretchr/testify v1.10.0
github.com/seaweedfs/raft v1.1.0
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/spf13/afero v1.9.3 // indirect
github.com/spf13/cast v1.5.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/viper v1.15.0
github.com/stretchr/testify v1.8.1
github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965
github.com/tidwall/gjson v1.18.0
github.com/tidwall/gjson v1.14.4
github.com/tidwall/match v1.1.1
github.com/tidwall/pretty v1.2.0 // indirect
github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365
github.com/tylertreat/BoomFilters v0.0.0-20210315201527-1a82519a3e43
github.com/valyala/bytebufferpool v1.0.0
github.com/viant/ptrie v1.0.1
github.com/viant/assertly v0.5.4 // indirect
github.com/viant/ptrie v0.3.0
github.com/viant/toolbox v0.33.2 // indirect
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
github.com/xdg-go/scram v1.1.2 // indirect
github.com/xdg-go/stringprep v1.0.4 // indirect
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
go.etcd.io/etcd/client/v3 v3.6.2
go.mongodb.org/mongo-driver v1.17.4
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect
go.etcd.io/etcd/client/v3 v3.5.7
go.mongodb.org/mongo-driver v1.11.1
go.opencensus.io v0.24.0 // indirect
gocloud.dev v0.43.0
gocloud.dev/pubsub/natspubsub v0.42.0
gocloud.dev/pubsub/rabbitpubsub v0.43.0
golang.org/x/crypto v0.40.0
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476
golang.org/x/image v0.29.0
golang.org/x/net v0.42.0
golang.org/x/oauth2 v0.30.0 // indirect
golang.org/x/sys v0.34.0
golang.org/x/text v0.27.0 // indirect
golang.org/x/tools v0.35.0
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
google.golang.org/api v0.242.0
google.golang.org/genproto v0.0.0-20250715232539-7130f93afb79 // indirect
google.golang.org/grpc v1.73.0
google.golang.org/protobuf v1.36.6
gocloud.dev v0.28.0
gocloud.dev/pubsub/natspubsub v0.28.0
gocloud.dev/pubsub/rabbitpubsub v0.27.0
golang.org/x/crypto v0.3.0 // indirect
golang.org/x/exp v0.0.0-20221031165847-c99f073a8326
golang.org/x/image v0.0.0-20200119044424-58c23975cae1
golang.org/x/net v0.7.0
golang.org/x/oauth2 v0.2.0 // indirect
golang.org/x/sys v0.5.0
golang.org/x/text v0.7.0 // indirect
golang.org/x/tools v0.6.0
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/api v0.108.0
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect
google.golang.org/grpc v1.52.0
google.golang.org/protobuf v1.28.1
gopkg.in/inf.v0 v0.9.1 // indirect
modernc.org/b v1.0.0 // indirect
modernc.org/mathutil v1.7.1
modernc.org/memory v1.11.0 // indirect
modernc.org/sqlite v1.38.0
modernc.org/strutil v1.2.1
modernc.org/cc/v3 v3.40.0 // indirect
modernc.org/ccgo/v3 v3.16.13 // indirect
modernc.org/libc v1.22.2 // indirect
modernc.org/mathutil v1.5.0 // indirect
modernc.org/memory v1.4.0 // indirect
modernc.org/opt v0.1.3 // indirect
modernc.org/sqlite v1.20.4
modernc.org/strutil v1.1.3
modernc.org/token v1.0.1 // indirect
)
require (
github.com/Jille/raft-grpc-transport v1.6.1
github.com/ThreeDotsLabs/watermill v1.4.7
github.com/a-h/templ v0.3.920
github.com/arangodb/go-driver v1.6.6
github.com/Jille/raft-grpc-transport v1.4.0
github.com/arangodb/go-driver v1.4.1
github.com/armon/go-metrics v0.4.1
github.com/aws/aws-sdk-go-v2 v1.36.6
github.com/aws/aws-sdk-go-v2/config v1.29.18
github.com/aws/aws-sdk-go-v2/credentials v1.17.71
github.com/aws/aws-sdk-go-v2/service/s3 v1.84.1
github.com/cognusion/imaging v1.0.2
github.com/fluent/fluent-logger-golang v1.10.0
github.com/getsentry/sentry-go v0.34.1
github.com/gin-contrib/sessions v1.0.4
github.com/gin-gonic/gin v1.10.1
github.com/golang-jwt/jwt/v5 v5.2.3
github.com/fluent/fluent-logger-golang v1.9.0
github.com/google/flatbuffers/go v0.0.0-20230108230133-3b8644d32c50
github.com/hanwen/go-fuse/v2 v2.8.0
github.com/hashicorp/raft v1.7.3
github.com/hashicorp/raft-boltdb/v2 v2.3.1
github.com/minio/crc64nvme v1.0.2
github.com/orcaman/concurrent-map/v2 v2.0.1
github.com/parquet-go/parquet-go v0.25.1
github.com/pkg/sftp v1.13.9
github.com/rabbitmq/amqp091-go v1.10.0
github.com/rclone/rclone v1.70.3
github.com/rdleal/intervalst v1.5.0
github.com/redis/go-redis/v9 v9.11.0
github.com/schollz/progressbar/v3 v3.18.0
github.com/shirou/gopsutil/v3 v3.24.5
github.com/tarantool/go-tarantool/v2 v2.4.0
github.com/tikv/client-go/v2 v2.0.7
github.com/ydb-platform/ydb-go-sdk-auth-environ v0.5.0
github.com/ydb-platform/ydb-go-sdk/v3 v3.113.1
go.etcd.io/etcd/client/pkg/v3 v3.6.2
go.uber.org/atomic v1.11.0
golang.org/x/sync v0.16.0
google.golang.org/grpc/security/advancedtls v1.0.0
)
require github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 // indirect
require (
github.com/cenkalti/backoff/v3 v3.2.2 // indirect
github.com/lithammer/shortuuid/v3 v3.0.7 // indirect
github.com/hanwen/go-fuse/v2 v2.1.1-0.20220627082937-d01fda7edf17
github.com/hashicorp/raft v1.3.11
github.com/hashicorp/raft-boltdb/v2 v2.2.2
github.com/rabbitmq/amqp091-go v1.7.0
github.com/schollz/progressbar/v3 v3.13.0
github.com/tikv/client-go/v2 v2.0.5
github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.2
github.com/ydb-platform/ydb-go-sdk/v3 v3.42.10
google.golang.org/grpc/security/advancedtls v0.0.0-20220622233350-5cdb09fa29c1
)
require (
cel.dev/expr v0.24.0 // indirect
cloud.google.com/go/auth v0.16.3 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
cloud.google.com/go/compute/metadata v0.7.0 // indirect
cloud.google.com/go/iam v1.5.2 // indirect
cloud.google.com/go/monitoring v1.24.2 // indirect
filippo.io/edwards25519 v1.1.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.1 // indirect
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
github.com/Files-com/files-sdk-go/v3 v3.2.173 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 // indirect
github.com/IBM/go-sdk-core/v5 v5.20.0 // indirect
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf // indirect
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e // indirect
github.com/ProtonMail/go-crypto v1.3.0 // indirect
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
github.com/ProtonMail/go-srp v0.0.7 // indirect
github.com/ProtonMail/gopenpgp/v2 v2.9.0 // indirect
github.com/PuerkitoBio/goquery v1.10.3 // indirect
github.com/abbot/go-http-auth v0.4.0 // indirect
github.com/andybalholm/brotli v1.1.0 // indirect
github.com/andybalholm/cascadia v1.3.3 // indirect
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc // indirect
cloud.google.com/go/compute v1.14.0 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/iam v0.8.0 // indirect
github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.33 // indirect
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.84 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.37 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.37 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.37 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.5 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.18 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.18 // indirect
github.com/aws/aws-sdk-go-v2/service/sns v1.34.7 // indirect
github.com/aws/aws-sdk-go-v2/service/sqs v1.38.8 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.25.6 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.4 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.34.1 // indirect
github.com/aws/smithy-go v1.22.4 // indirect
github.com/aws/aws-sdk-go-v2 v1.17.1 // indirect
github.com/aws/aws-sdk-go-v2/config v1.18.3 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.13.3 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.19 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.25 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.19 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.26 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.19 // indirect
github.com/aws/aws-sdk-go-v2/service/sns v1.18.6 // indirect
github.com/aws/aws-sdk-go-v2/service/sqs v1.19.15 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.11.25 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.8 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.17.5 // indirect
github.com/aws/smithy-go v1.13.4 // indirect
github.com/benbjohnson/clock v1.1.0 // indirect
github.com/boltdb/bolt v1.3.1 // indirect
github.com/bradenaw/juniper v0.15.3 // indirect
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
github.com/buengese/sgzip v0.1.1 // indirect
github.com/bytedance/sonic v1.13.2 // indirect
github.com/bytedance/sonic/loader v0.2.4 // indirect
github.com/calebcase/tmpfile v1.0.3 // indirect
github.com/chilts/sid v0.0.0-20190607042430-660e94789ec9 // indirect
github.com/cloudflare/circl v1.6.1 // indirect
github.com/cloudinary/cloudinary-go/v2 v2.10.0 // indirect
github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc // indirect
github.com/cloudsoda/sddl v0.0.0-20250224235906-926454e91efc // indirect
github.com/cloudwego/base64x v0.1.5 // indirect
github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect
github.com/colinmarc/hdfs/v2 v2.4.0 // indirect
github.com/creasty/defaults v1.8.0 // indirect
github.com/cronokirby/saferith v0.33.0 // indirect
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect
github.com/d4l3k/messagediff v1.2.1 // indirect
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect
github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5 // indirect
github.com/ebitengine/purego v0.8.4 // indirect
github.com/elastic/gosigar v0.14.2 // indirect
github.com/emersion/go-message v0.18.2 // indirect
github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff // indirect
github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
github.com/fatih/color v1.16.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/flynn/noise v1.1.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.9 // indirect
github.com/geoffgarside/ber v1.2.0 // indirect
github.com/gin-contrib/sse v1.0.0 // indirect
github.com/go-chi/chi/v5 v5.2.2 // indirect
github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348 // indirect
github.com/go-jose/go-jose/v4 v4.1.1 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-openapi/errors v0.22.1 // indirect
github.com/go-openapi/strfmt v0.23.0 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.26.0 // indirect
github.com/go-resty/resty/v2 v2.16.5 // indirect
github.com/go-viper/mapstructure/v2 v2.3.0 // indirect
github.com/goccy/go-json v0.10.5 // indirect
github.com/gofrs/flock v0.12.1 // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/fclairamb/go-log v0.4.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
github.com/google/s2a-go v0.1.9 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
github.com/gorilla/context v1.1.2 // indirect
github.com/gorilla/schema v1.4.1 // indirect
github.com/gorilla/securecookie v1.1.2 // indirect
github.com/gorilla/sessions v1.4.0 // indirect
github.com/golang-jwt/jwt/v4 v4.4.3 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-hclog v1.6.3 // indirect
github.com/hashicorp/go-hclog v1.2.0 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-metrics v0.5.4 // indirect
github.com/hashicorp/go-msgpack/v2 v2.1.2 // indirect
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
github.com/hashicorp/golang-lru v0.6.0 // indirect
github.com/henrybear327/Proton-API-Bridge v1.0.0 // indirect
github.com/henrybear327/go-proton-api v1.0.0 // indirect
github.com/hashicorp/go-msgpack v1.1.5 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
github.com/jcmturner/goidentity/v6 v6.0.1 // indirect
github.com/jcmturner/rpc/v2 v2.0.3 // indirect
github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3 // indirect
github.com/jonboulle/clockwork v0.5.0 // indirect
github.com/jonboulle/clockwork v0.2.2 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/jtolio/noiseconn v0.0.0-20231127013910-f6d9ecbf1de7 // indirect
github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 // indirect
github.com/k0kubun/pp v3.0.1+incompatible
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
github.com/koofr/go-httpclient v0.0.0-20240520111329-e20f8f203988 // indirect
github.com/koofr/go-koofrclient v0.0.0-20221207135200-cbd7fc9ad6a6 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/lanrat/extsort v1.0.2 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/lpar/date v1.0.0 // indirect
github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/klauspost/cpuid/v2 v2.1.1 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/mattn/go-sqlite3 v2.0.1+incompatible // indirect
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/montanaflynn/stats v0.7.1 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/nats-io/nats.go v1.40.1 // indirect
github.com/nats-io/nkeys v0.4.10 // indirect
github.com/montanaflynn/stats v0.6.6 // indirect
github.com/nats-io/nats.go v1.20.0 // indirect
github.com/nats-io/nkeys v0.3.0 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/ncruces/go-strftime v0.1.9 // indirect
github.com/ncw/swift/v2 v2.0.4 // indirect
github.com/nxadm/tail v1.4.11 // indirect
github.com/oklog/ulid v1.3.1 // indirect
github.com/onsi/ginkgo/v2 v2.23.3 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/oracle/oci-go-sdk/v65 v65.93.0 // indirect
github.com/panjf2000/ants/v2 v2.11.3 // indirect
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 // indirect
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect
github.com/pierrec/lz4/v4 v4.1.21 // indirect
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
github.com/philhofer/fwd v1.1.1 // indirect
github.com/pierrec/lz4/v4 v4.1.17 // indirect
github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c // indirect
github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c // indirect
github.com/pingcap/kvproto v0.0.0-20230403051650-e166ae588106 // indirect
github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 // indirect
github.com/pingcap/kvproto v0.0.0-20230201112839-2b853bed8125 // indirect
github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pkg/xattr v0.4.10 // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 // indirect
github.com/relvacode/iso8601 v1.6.0 // indirect
github.com/rfjakob/eme v1.1.2 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // indirect
github.com/sagikazarmark/locafero v0.7.0 // indirect
github.com/samber/lo v1.50.0 // indirect
github.com/shirou/gopsutil/v4 v4.25.5 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
github.com/smartystreets/goconvey v1.8.1 // indirect
github.com/sony/gobreaker v1.0.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spacemonkeygo/monkit/v3 v3.0.24 // indirect
github.com/spf13/pflag v1.0.6 // indirect
github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5 // indirect
github.com/tarantool/go-iproto v1.1.0 // indirect
github.com/rivo/uniseg v0.4.3 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/stathat/consistent v1.0.0 // indirect
github.com/subosito/gotenv v1.4.2 // indirect
github.com/tiancaiamao/gp v0.0.0-20221230034425-4025bc8a4d4a // indirect
github.com/tikv/pd/client v0.0.0-20230329114254-1948c247c2b1 // indirect
github.com/tinylib/msgp v1.3.0 // indirect
github.com/tklauser/go-sysconf v0.3.15 // indirect
github.com/tklauser/numcpus v0.10.0 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/tikv/pd v1.1.0-beta.0.20230202094356-18df271ce57f // indirect
github.com/tikv/pd/client v0.0.0-20230202094356-18df271ce57f // indirect
github.com/tinylib/msgp v1.1.6 // indirect
github.com/twmb/murmur3 v1.1.3 // indirect
github.com/ugorji/go/codec v1.2.12 // indirect
github.com/unknwon/goconfig v1.0.0 // indirect
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
github.com/yandex-cloud/go-genproto v0.0.0-20211115083454-9ca41db5ed9e // indirect
github.com/ydb-platform/ydb-go-genproto v0.0.0-20241112172322-ea1f63298f77 // indirect
github.com/ydb-platform/ydb-go-yc v0.12.1 // indirect
github.com/ydb-platform/ydb-go-yc-metadata v0.6.1 // indirect
github.com/yunify/qingstor-sdk-go/v3 v3.2.0 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
github.com/zeebo/blake3 v0.2.4 // indirect
github.com/zeebo/errs v1.4.0 // indirect
go.etcd.io/bbolt v1.4.0 // indirect
go.etcd.io/etcd/api/v3 v3.6.2 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/detectors/gcp v1.37.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.62.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 // indirect
go.opentelemetry.io/otel v1.37.0 // indirect
go.opentelemetry.io/otel/metric v1.37.0 // indirect
go.opentelemetry.io/otel/sdk v1.37.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect
go.opentelemetry.io/otel/trace v1.37.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/arch v0.16.0 // indirect
golang.org/x/term v0.33.0 // indirect
golang.org/x/time v0.12.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250715232539-7130f93afb79 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250715232539-7130f93afb79 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/validator.v2 v2.0.1 // indirect
github.com/ydb-platform/ydb-go-genproto v0.0.0-20221215182650-986f9d10542f // indirect
github.com/ydb-platform/ydb-go-yc v0.8.3 // indirect
github.com/ydb-platform/ydb-go-yc-metadata v0.5.2 // indirect
go.etcd.io/bbolt v1.3.6 // indirect
go.etcd.io/etcd/api/v3 v3.5.7 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.8.0 // indirect
go.uber.org/zap v1.24.0 // indirect
golang.org/x/mod v0.8.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/term v0.5.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
modernc.org/libc v1.65.10 // indirect
moul.io/http2curl/v2 v2.3.0 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
storj.io/common v0.0.0-20250605163628-70ca83b6228e // indirect
storj.io/drpc v0.0.35-0.20250513201419-f7819ea69b55 // indirect
storj.io/eventkit v0.0.0-20250410172343-61f26d3de156 // indirect
storj.io/infectious v0.0.2 // indirect
storj.io/picobuf v0.0.4 // indirect
storj.io/uplink v1.13.1 // indirect
lukechampine.com/uint128 v1.2.0 // indirect
)
// replace github.com/seaweedfs/raft => /Users/chrislu/go/src/github.com/seaweedfs/raft

2886
go.sum

File diff suppressed because it is too large Load diff

View file

@ -1,16 +0,0 @@
# Artifact Hub repository metadata file
#
# Some settings like the verified publisher flag or the ignored packages won't
# be applied until the next time the repository is processed. Please keep in
# mind that the repository won't be processed if it has not changed since the
# last time it was processed. Depending on the repository kind, this is checked
# in a different way. For Helm http based repositories, we consider it has
# changed if the `index.yaml` file changes. For git based repositories, it does
# when the hash of the last commit in the branch you set up changes. This does
# NOT apply to ownership claim operations, which are processed immediately.
#
repositoryID: 5b2f1fe2-20e5-486e-9746-183484642aa2
# owners: # (optional, used to claim repository ownership)
# - name: username
# email: email

View file

@ -1,6 +1,5 @@
apiVersion: v1
description: SeaweedFS
name: seaweedfs
appVersion: "3.95"
# Dev note: Trigger a helm chart release by `git tag -a helm-<version>`
version: 4.0.395
appVersion: "3.43"
version: "3.43"

View file

@ -1,40 +1,17 @@
# SEAWEEDFS - helm chart (2.x+)
## Getting Started
### Add the helm repo
```bash
helm repo add seaweedfs https://seaweedfs.github.io/seaweedfs/helm
```
### Install the helm chart
```bash
helm install seaweedfs seaweedfs/seaweedfs
```
### (Recommended) Provide `values.yaml`
```bash
helm install --values=values.yaml seaweedfs seaweedfs/seaweedfs
```
# SEAWEEDFS - helm chart (2.x)
## Info:
* master/filer/volume are stateful sets with anti-affinity on the hostname,
so your deployment will be spread/HA.
* chart is using memsql(mysql) as the filer backend to enable HA (multiple filer instances) and backup/HA memsql can provide.
* mysql user/password are created in a k8s secret (secret-seaweedfs-db.yaml) and injected to the filer with ENV.
* cert config exists and can be enabled, but not been tested, requires cert-manager to be installed.
* chart is using memsql(mysql) as the filer backend to enable HA (multiple filer instances)
and backup/HA memsql can provide.
* mysql user/password are created in a k8s secret (secret-seaweedfs-db.yaml) and injected to the filer
with ENV.
* cert config exists and can be enabled, but not been tested.
## Prerequisites
### Database
leveldb is the default database, this supports multiple filer replicas that will [sync automatically](https://github.com/seaweedfs/seaweedfs/wiki/Filer-Store-Replication), with some [limitations](https://github.com/seaweedfs/seaweedfs/wiki/Filer-Store-Replication#limitation).
When the [limitations](https://github.com/seaweedfs/seaweedfs/wiki/Filer-Store-Replication#limitation) apply, or for a large number of filer replicas, an external datastore is recommended.
Such as MySQL-compatible database, as specified in the `values.yaml` at `filer.extraEnvironmentVars`.
A running MySQL-compatible database is expected by default, as specified in the `values.yaml` at `filer.extraEnvironmentVars`.
This database should be pre-configured and initialized by running:
```sql
CREATE TABLE IF NOT EXISTS `filemeta` (
@ -46,31 +23,29 @@ CREATE TABLE IF NOT EXISTS `filemeta` (
) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
```
Alternative database can also be configured (e.g. leveldb, postgres) following the instructions at `filer.extraEnvironmentVars`.
Alternative database can also be configured (e.g. leveldb) following the instructions at `filer.extraEnvironmentVars`.
### Node Labels
Kubernetes nodes can have labels which help to define which node(Host) will run which pod:
Here is an example:
Kubernetes node have labels which help to define which node(Host) will run which pod:
* s3/filer/master needs the label **sw-backend=true**
* volume need the label **sw-volume=true**
to label a node to be able to run all pod types in k8s:
```
kubectl label node YOUR_NODE_NAME sw-volume=true sw-backend=true
kubectl label node YOUR_NODE_NAME sw-volume=true,sw-backend=true
```
on production k8s deployment you will want each pod to have a different host,
especially the volume server and the masters, all pods (master/volume/filer)
should have anti-affinity rules to disallow running multiple component pods on the same host.
If you still want to run multiple pods of the same component (master/volume/filer) on the same host please set/update the corresponding affinity rule in values.yaml to an empty one:
especially the volume server & the masters, currently all pods (master/volume/filer)
have anti-affinity rule to disallow running multiple pod type on the same host.
if you still want to run multiple pods of the same type (master/volume/filer) on the same host
please set/update the corresponding affinity rule in values.yaml to an empty one:
```affinity: ""```
## PVC - storage class ###
On the volume stateful set added support for k8s PVC, currently example
on the volume stateful set added support for K8S PVC, currently example
with the simple local-path-provisioner from Rancher (comes included with k3d / k3s)
https://github.com/rancher/local-path-provisioner
@ -78,74 +53,10 @@ you can use ANY storage class you like, just update the correct storage-class
for your deployment.
## current instances config (AIO):
1 instance for each type (master/filer+s3/volume)
You can update the replicas count for each node type in values.yaml,
need to add more nodes with the corresponding labels if applicable.
you can update the replicas count for each node type in values.yaml,
need to add more nodes with the corresponding labels.
Most of the configuration are available through values.yaml any pull requests to expand functionality or usability are greatly appreciated. Any pull request must pass [chart-testing](https://github.com/helm/chart-testing).
most of the configuration are available through values.yaml
## S3 configuration
To enable an s3 endpoint for your filer with a default install add the following to your values.yaml:
```yaml
filer:
s3:
enabled: true
```
### Enabling Authentication to S3
To enable authentication for S3, you have two options:
- let the helm chart create an admin user as well as a read only user
- provide your own s3 config.json file via an existing Kubernetes Secret
#### Use the default credentials for S3
Example parameters for your values.yaml:
```yaml
filer:
s3:
enabled: true
enableAuth: true
```
#### Provide your own credentials for S3
Example parameters for your values.yaml:
```yaml
filer:
s3:
enabled: true
enableAuth: true
existingConfigSecret: my-s3-secret
```
Example existing secret with your s3 config to create an admin user and readonly user, both with credentials:
```yaml
---
# Source: seaweedfs/templates/seaweedfs-s3-secret.yaml
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: my-s3-secret
namespace: seaweedfs
labels:
app.kubernetes.io/name: seaweedfs
app.kubernetes.io/component: s3
stringData:
# this key must be an inline json config file
seaweedfs_s3_config: '{"identities":[{"name":"anvAdmin","credentials":[{"accessKey":"snu8yoP6QAlY0ne4","secretKey":"PNzBcmeLNEdR0oviwm04NQAicOrDH1Km"}],"actions":["Admin","Read","Write"]},{"name":"anvReadOnly","credentials":[{"accessKey":"SCigFee6c5lbi04A","secretKey":"kgFhbT38R8WUYVtiFQ1OiSVOrYr3NKku"}],"actions":["Read"]}]}'
```
## Enterprise
For enterprise users, please visit [seaweedfs.com](https://seaweedfs.com) for the SeaweedFS Enterprise Edition,
which has a self-healing storage format with better data protection.

View file

@ -49,7 +49,25 @@ Inject extra environment vars in the format key:value, if populated
{{- $imageOverride := .Values.filer.imageOverride -}}
{{- printf "%s" $imageOverride -}}
{{- else -}}
{{- include "common.image" . }}
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
{{- $repositoryName := .Values.image.repository | toString -}}
{{- $name := .Values.global.imageName | toString -}}
{{- $tag := .Chart.AppVersion | toString -}}
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
{{- end -}}
{{- end -}}
{{/* Return the proper dbSchema image */}}
{{- define "filer.dbSchema.image" -}}
{{- if .Values.filer.dbSchema.imageOverride -}}
{{- $imageOverride := .Values.filer.dbSchema.imageOverride -}}
{{- printf "%s" $imageOverride -}}
{{- else -}}
{{- $registryName := default .Values.global.registry .Values.global.localRegistry | toString -}}
{{- $repositoryName := .Values.global.repository | toString -}}
{{- $name := .Values.filer.dbSchema.imageName | toString -}}
{{- $tag := .Values.filer.dbSchema.imageTag | toString -}}
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
{{- end -}}
{{- end -}}
@ -59,7 +77,11 @@ Inject extra environment vars in the format key:value, if populated
{{- $imageOverride := .Values.master.imageOverride -}}
{{- printf "%s" $imageOverride -}}
{{- else -}}
{{- include "common.image" . }}
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
{{- $repositoryName := .Values.image.repository | toString -}}
{{- $name := .Values.global.imageName | toString -}}
{{- $tag := .Chart.AppVersion | toString -}}
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
{{- end -}}
{{- end -}}
@ -69,17 +91,11 @@ Inject extra environment vars in the format key:value, if populated
{{- $imageOverride := .Values.s3.imageOverride -}}
{{- printf "%s" $imageOverride -}}
{{- else -}}
{{- include "common.image" . }}
{{- end -}}
{{- end -}}
{{/* Return the proper sftp image */}}
{{- define "sftp.image" -}}
{{- if .Values.sftp.imageOverride -}}
{{- $imageOverride := .Values.sftp.imageOverride -}}
{{- printf "%s" $imageOverride -}}
{{- else -}}
{{- include "common.image" . }}
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
{{- $repositoryName := .Values.image.repository | toString -}}
{{- $name := .Values.global.imageName | toString -}}
{{- $tag := .Chart.AppVersion | toString -}}
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
{{- end -}}
{{- end -}}
@ -89,20 +105,11 @@ Inject extra environment vars in the format key:value, if populated
{{- $imageOverride := .Values.volume.imageOverride -}}
{{- printf "%s" $imageOverride -}}
{{- else -}}
{{- include "common.image" . }}
{{- end -}}
{{- end -}}
{{/* Computes the container image name for all components (if they are not overridden) */}}
{{- define "common.image" -}}
{{- $registryName := default .Values.image.registry .Values.global.registry | toString -}}
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
{{- $repositoryName := .Values.image.repository | toString -}}
{{- $name := .Values.global.imageName | toString -}}
{{- $tag := default .Chart.AppVersion .Values.image.tag | toString -}}
{{- if $registryName -}}
{{- printf "%s/%s%s:%s" $registryName $repositoryName $name $tag -}}
{{- else -}}
{{- printf "%s%s:%s" $repositoryName $name $tag -}}
{{- $tag := .Chart.AppVersion | toString -}}
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
{{- end -}}
{{- end -}}
@ -111,7 +118,20 @@ Inject extra environment vars in the format key:value, if populated
{{- if or (or (eq .Values.volume.data.type "persistentVolumeClaim") (and (eq .Values.volume.idx.type "persistentVolumeClaim") .Values.volume.dir_idx )) (eq .Values.volume.logs.type "persistentVolumeClaim") -}}
{{- printf "true" -}}
{{- else -}}
{{- printf "" -}}
{{- printf "false" -}}
{{- end -}}
{{- end -}}
{{/* check if any Volume HostPath exists */}}
{{- define "volume.hostpath_exists" -}}
{{- if or (or (eq .Values.volume.data.type "hostPath") (and (eq .Values.volume.idx.type "hostPath") .Values.volume.dir_idx )) (eq .Values.volume.logs.type "hostPath") -}}
{{- printf "true" -}}
{{- else -}}
{{- if or .Values.global.enableSecurity .Values.volume.extraVolumes -}}
{{- printf "true" -}}
{{- else -}}
{{- printf "false" -}}
{{- end -}}
{{- end -}}
{{- end -}}
@ -120,7 +140,16 @@ Inject extra environment vars in the format key:value, if populated
{{- if or (eq .Values.filer.data.type "persistentVolumeClaim") (eq .Values.filer.logs.type "persistentVolumeClaim") -}}
{{- printf "true" -}}
{{- else -}}
{{- printf "" -}}
{{- printf "false" -}}
{{- end -}}
{{- end -}}
{{/* check if any Filer HostPath exists */}}
{{- define "filer.hostpath_exists" -}}
{{- if or (eq .Values.filer.data.type "hostPath") (eq .Values.filer.logs.type "hostPath") -}}
{{- printf "true" -}}
{{- else -}}
{{- printf "false" -}}
{{- end -}}
{{- end -}}
@ -129,93 +158,28 @@ Inject extra environment vars in the format key:value, if populated
{{- if or (eq .Values.master.data.type "persistentVolumeClaim") (eq .Values.master.logs.type "persistentVolumeClaim") -}}
{{- printf "true" -}}
{{- else -}}
{{- printf "" -}}
{{- printf "false" -}}
{{- end -}}
{{- end -}}
{{/* check if any Master HostPath exists */}}
{{- define "master.hostpath_exists" -}}
{{- if or (eq .Values.master.data.type "hostPath") (eq .Values.master.logs.type "hostPath") -}}
{{- printf "true" -}}
{{- else -}}
{{- if or .Values.global.enableSecurity .Values.volume.extraVolumes -}}
{{- printf "true" -}}
{{- else -}}
{{- printf "false" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/* check if any InitContainers exist for Volumes */}}
{{- define "volume.initContainers_exists" -}}
{{- if or (not (empty .Values.volume.idx )) (not (empty .Values.volume.initContainers )) -}}
{{- if or (not (empty .Values.volume.dir_idx )) (not (empty .Values.volume.initContainers )) -}}
{{- printf "true" -}}
{{- else -}}
{{- printf "" -}}
{{- end -}}
{{- end -}}
{{/* Return the proper imagePullSecrets */}}
{{- define "seaweedfs.imagePullSecrets" -}}
{{- with .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- if kindIs "string" . }}
- name: {{ . }}
{{- else }}
{{- range . }}
{{- if kindIs "string" . }}
- name: {{ . }}
{{- else }}
- {{ toYaml . }}
{{- end}}
{{- end }}
{{- end }}
{{- end }}
{{- end -}}
{{/*
Renders a value that contains template perhaps with scope if the scope is present.
Usage:
{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ ) }}
{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ "scope" $app ) }}
*/}}
{{- define "common.tplvalues.render" -}}
{{- $value := typeIs "string" .value | ternary .value (.value | toYaml) }}
{{- if contains "{{" (toJson .value) }}
{{- if .scope }}
{{- tpl (cat "{{- with $.RelativeScope -}}" $value "{{- end }}") (merge (dict "RelativeScope" .scope) .context) }}
{{- else }}
{{- tpl $value .context }}
{{- end }}
{{- else }}
{{- $value }}
{{- end }}
{{- end -}}
{{/*
Converts a Kubernetes quantity like "256Mi" or "2G" to a float64 in base units,
handling both binary (Ki, Mi, Gi) and decimal (m, k, M) suffixes; numeric inputs
Usage:
{{ include "common.resource-quantity" "10Gi" }}
*/}}
{{- define "common.resource-quantity" -}}
{{- $value := . -}}
{{- $unit := 1.0 -}}
{{- if typeIs "string" . -}}
{{- $base2 := dict "Ki" 0x1p10 "Mi" 0x1p20 "Gi" 0x1p30 "Ti" 0x1p40 "Pi" 0x1p50 "Ei" 0x1p60 -}}
{{- $base10 := dict "m" 1e-3 "k" 1e3 "M" 1e6 "G" 1e9 "T" 1e12 "P" 1e15 "E" 1e18 -}}
{{- range $k, $v := merge $base2 $base10 -}}
{{- if hasSuffix $k $ -}}
{{- $value = trimSuffix $k $ -}}
{{- $unit = $v -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- mulf (float64 $value) $unit -}}
{{- end -}}
{{/*
getOrGeneratePassword will check if a password exists in a secret and return it,
or generate a new random password if it doesn't exist.
*/}}
{{- define "getOrGeneratePassword" -}}
{{- $params := . -}}
{{- $namespace := $params.namespace -}}
{{- $secretName := $params.secretName -}}
{{- $key := $params.key -}}
{{- $length := default 16 $params.length -}}
{{- $existingSecret := lookup "v1" "Secret" $namespace $secretName -}}
{{- if and $existingSecret (index $existingSecret.data $key) -}}
{{- index $existingSecret.data $key | b64dec -}}
{{- else -}}
{{- randAlphaNum $length -}}
{{- printf "false" -}}
{{- end -}}
{{- end -}}

View file

@ -1,431 +0,0 @@
{{- if .Values.allInOne.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "seaweedfs.name" . }}-all-in-one
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: seaweedfs-all-in-one
{{- if .Values.allInOne.annotations }}
annotations:
{{- toYaml .Values.allInOne.annotations | nindent 4 }}
{{- end }}
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: seaweedfs-all-in-one
template:
metadata:
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: seaweedfs-all-in-one
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.allInOne.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
annotations:
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.allInOne.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
restartPolicy: {{ default .Values.global.restartPolicy .Values.allInOne.restartPolicy }}
{{- if .Values.allInOne.affinity }}
affinity:
{{ tpl .Values.allInOne.affinity . | nindent 8 | trim }}
{{- end }}
{{- if .Values.allInOne.topologySpreadConstraints }}
topologySpreadConstraints:
{{ tpl .Values.allInOne.topologySpreadConstraints . | nindent 8 | trim }}
{{- end }}
{{- if .Values.allInOne.tolerations }}
tolerations:
{{- tpl .Values.allInOne.tolerations . | nindent 8 }}
{{- end }}
{{- include "seaweedfs.imagePullSecrets" . | nindent 6 }}
terminationGracePeriodSeconds: 60
enableServiceLinks: false
{{- if .Values.allInOne.priorityClassName }}
priorityClassName: {{ .Values.allInOne.priorityClassName | quote }}
{{- end }}
{{- if .Values.allInOne.serviceAccountName }}
serviceAccountName: {{ .Values.allInOne.serviceAccountName | quote }}
{{- end }}
{{- if .Values.allInOne.initContainers }}
initContainers:
{{- tpl .Values.allInOne.initContainers . | nindent 8 }}
{{- end }}
{{- if .Values.allInOne.podSecurityContext.enabled }}
securityContext:
{{- omit .Values.allInOne.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
containers:
- name: seaweedfs
image: {{ template "master.image" . }}
imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }}
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: SEAWEEDFS_FULLNAME
value: "{{ template "seaweedfs.name" . }}"
{{- if .Values.allInOne.extraEnvironmentVars }}
{{- range $key, $value := .Values.allInOne.extraEnvironmentVars }}
- name: {{ $key }}
{{- if kindIs "string" $value }}
value: {{ $value | quote }}
{{- else }}
valueFrom:
{{ toYaml $value | nindent 16 }}
{{- end }}
{{- end }}
{{- end }}
{{- if .Values.global.extraEnvironmentVars }}
{{- range $key, $value := .Values.global.extraEnvironmentVars }}
- name: {{ $key }}
{{- if kindIs "string" $value }}
value: {{ $value | quote }}
{{- else }}
valueFrom:
{{ toYaml $value | nindent 16 }}
{{- end }}
{{- end }}
{{- end }}
command:
- "/bin/sh"
- "-ec"
- |
/usr/bin/weed \
-v={{ .Values.global.loggingLevel }} \
server \
-dir=/data \
-master \
-volume \
-ip=${POD_IP} \
-ip.bind=0.0.0.0 \
{{- if .Values.allInOne.idleTimeout }}
-idleTimeout={{ .Values.allInOne.idleTimeout }} \
{{- end }}
{{- if .Values.allInOne.dataCenter }}
-dataCenter={{ .Values.allInOne.dataCenter }} \
{{- end }}
{{- if .Values.allInOne.rack }}
-rack={{ .Values.allInOne.rack }} \
{{- end }}
{{- if .Values.allInOne.whiteList }}
-whiteList={{ .Values.allInOne.whiteList }} \
{{- end }}
{{- if .Values.allInOne.disableHttp }}
-disableHttp={{ .Values.allInOne.disableHttp }} \
{{- end }}
{{- if and (.Values.volume.dataDirs) (index .Values.volume.dataDirs 0 "maxVolumes") }}
-volume.max={{ index .Values.volume.dataDirs 0 "maxVolumes" }} \
{{- end }}
-master.port={{ .Values.master.port }} \
{{- if .Values.global.enableReplication }}
-master.defaultReplication={{ .Values.global.replicationPlacement }} \
{{- else }}
-master.defaultReplication={{ .Values.master.defaultReplication }} \
{{- end }}
{{- if .Values.master.volumePreallocate }}
-master.volumePreallocate \
{{- end }}
-master.volumeSizeLimitMB={{ .Values.master.volumeSizeLimitMB }} \
{{- if .Values.master.garbageThreshold }}
-master.garbageThreshold={{ .Values.master.garbageThreshold }} \
{{- end }}
-volume.port={{ .Values.volume.port }} \
-volume.readMode={{ .Values.volume.readMode }} \
{{- if .Values.volume.imagesFixOrientation }}
-volume.images.fix.orientation \
{{- end }}
{{- if .Values.volume.index }}
-volume.index={{ .Values.volume.index }} \
{{- end }}
{{- if .Values.volume.fileSizeLimitMB }}
-volume.fileSizeLimitMB={{ .Values.volume.fileSizeLimitMB }} \
{{- end }}
-volume.minFreeSpacePercent={{ .Values.volume.minFreeSpacePercent }} \
-volume.compactionMBps={{ .Values.volume.compactionMBps }} \
{{- if .Values.allInOne.metricsPort }}
-metricsPort={{ .Values.allInOne.metricsPort }} \
{{- else if .Values.master.metricsPort }}
-metricsPort={{ .Values.master.metricsPort }} \
{{- end }}
-filer \
-filer.port={{ .Values.filer.port }} \
{{- if .Values.filer.disableDirListing }}
-filer.disableDirListing \
{{- end }}
-filer.dirListLimit={{ .Values.filer.dirListLimit }} \
{{- if .Values.global.enableReplication }}
-filer.defaultReplicaPlacement={{ .Values.global.replicationPlacement }} \
{{- else }}
-filer.defaultReplicaPlacement={{ .Values.filer.defaultReplicaPlacement }} \
{{- end }}
{{- if .Values.filer.maxMB }}
-filer.maxMB={{ .Values.filer.maxMB }} \
{{- end }}
{{- if .Values.filer.encryptVolumeData }}
-filer.encryptVolumeData \
{{- end }}
{{- if .Values.filer.filerGroup}}
-filer.filerGroup={{ .Values.filer.filerGroup}} \
{{- end }}
{{- if .Values.filer.rack }}
-filer.rack={{ .Values.filer.rack }} \
{{- end }}
{{- if .Values.filer.dataCenter }}
-filer.dataCenter={{ .Values.filer.dataCenter }} \
{{- end }}
{{- if .Values.allInOne.s3.enabled }}
-s3 \
-s3.port={{ .Values.s3.port }} \
{{- if .Values.s3.domainName }}
-s3.domainName={{ .Values.s3.domainName }} \
{{- end }}
{{- if .Values.global.enableSecurity }}
{{- if .Values.s3.httpsPort }}
-s3.port.https={{ .Values.s3.httpsPort }} \
{{- end }}
-s3.cert.file=/usr/local/share/ca-certificates/client/tls.crt \
-s3.key.file=/usr/local/share/ca-certificates/client/tls.key \
{{- end }}
{{- if eq (typeOf .Values.s3.allowEmptyFolder) "bool" }}
-s3.allowEmptyFolder={{ .Values.s3.allowEmptyFolder }} \
{{- end }}
{{- if .Values.s3.enableAuth }}
-s3.config=/etc/sw/s3/seaweedfs_s3_config \
{{- end }}
{{- if .Values.s3.auditLogConfig }}
-s3.auditLogConfig=/etc/sw/s3/s3_auditLogConfig.json \
{{- end }}
{{- end }}
{{- if .Values.allInOne.sftp.enabled }}
-sftp \
-sftp.port={{ .Values.sftp.port }} \
{{- if .Values.sftp.sshPrivateKey }}
-sftp.sshPrivateKey={{ .Values.sftp.sshPrivateKey }} \
{{- end }}
{{- if .Values.sftp.hostKeysFolder }}
-sftp.hostKeysFolder={{ .Values.sftp.hostKeysFolder }} \
{{- end }}
{{- if .Values.sftp.authMethods }}
-sftp.authMethods={{ .Values.sftp.authMethods }} \
{{- end }}
{{- if .Values.sftp.maxAuthTries }}
-sftp.maxAuthTries={{ .Values.sftp.maxAuthTries }} \
{{- end }}
{{- if .Values.sftp.bannerMessage }}
-sftp.bannerMessage="{{ .Values.sftp.bannerMessage }}" \
{{- end }}
{{- if .Values.sftp.loginGraceTime }}
-sftp.loginGraceTime={{ .Values.sftp.loginGraceTime }} \
{{- end }}
{{- if .Values.sftp.clientAliveInterval }}
-sftp.clientAliveInterval={{ .Values.sftp.clientAliveInterval }} \
{{- end }}
{{- if .Values.sftp.clientAliveCountMax }}
-sftp.clientAliveCountMax={{ .Values.sftp.clientAliveCountMax }} \
{{- end }}
-sftp.userStoreFile=/etc/sw/sftp/seaweedfs_sftp_config \
{{- end }}
volumeMounts:
- name: data
mountPath: /data
{{- if and .Values.allInOne.s3.enabled (or .Values.s3.enableAuth .Values.filer.s3.enableAuth) }}
- name: config-s3-users
mountPath: /etc/sw/s3
readOnly: true
{{- end }}
{{- if .Values.allInOne.sftp.enabled }}
- name: config-ssh
mountPath: /etc/sw/ssh
readOnly: true
- mountPath: /etc/sw/sftp
name: config-users
readOnly: true
{{- end }}
{{- if .Values.filer.notificationConfig }}
- name: notification-config
mountPath: /etc/seaweedfs/notification.toml
subPath: notification.toml
readOnly: true
{{- end }}
- name: master-config
mountPath: /etc/seaweedfs/master.toml
subPath: master.toml
readOnly: true
{{- if .Values.global.enableSecurity }}
- name: security-config
mountPath: /etc/seaweedfs/security.toml
subPath: security.toml
readOnly: true
- name: ca-cert
mountPath: /usr/local/share/ca-certificates/ca/
readOnly: true
- name: master-cert
mountPath: /usr/local/share/ca-certificates/master/
readOnly: true
- name: volume-cert
mountPath: /usr/local/share/ca-certificates/volume/
readOnly: true
- name: filer-cert
mountPath: /usr/local/share/ca-certificates/filer/
readOnly: true
- name: client-cert
mountPath: /usr/local/share/ca-certificates/client/
readOnly: true
{{- end }}
{{ tpl .Values.allInOne.extraVolumeMounts . | nindent 12 }}
ports:
- containerPort: {{ .Values.master.port }}
name: swfs-mas
- containerPort: {{ .Values.master.grpcPort }}
name: swfs-mas-grpc
- containerPort: {{ .Values.volume.port }}
name: swfs-vol
- containerPort: {{ .Values.volume.grpcPort }}
name: swfs-vol-grpc
- containerPort: {{ .Values.filer.port }}
name: swfs-fil
- containerPort: {{ .Values.filer.grpcPort }}
name: swfs-fil-grpc
{{- if .Values.allInOne.s3.enabled }}
- containerPort: {{ .Values.s3.port }}
name: swfs-s3
{{- if .Values.s3.httpsPort }}
- containerPort: {{ .Values.s3.httpsPort }}
name: swfs-s3-tls
{{- end }}
{{- end }}
{{- if .Values.allInOne.sftp.enabled }}
- containerPort: {{ .Values.sftp.port }}
name: swfs-sftp
{{- end }}
{{- if .Values.allInOne.metricsPort }}
- containerPort: {{ .Values.allInOne.metricsPort }}
name: server-metrics
{{- end }}
{{- if .Values.allInOne.readinessProbe.enabled }}
readinessProbe:
httpGet:
path: {{ .Values.allInOne.readinessProbe.httpGet.path }}
port: {{ .Values.master.port }}
scheme: {{ .Values.allInOne.readinessProbe.scheme }}
initialDelaySeconds: {{ .Values.allInOne.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.allInOne.readinessProbe.periodSeconds }}
successThreshold: {{ .Values.allInOne.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.allInOne.readinessProbe.failureThreshold }}
timeoutSeconds: {{ .Values.allInOne.readinessProbe.timeoutSeconds }}
{{- end }}
{{- if .Values.allInOne.livenessProbe.enabled }}
livenessProbe:
httpGet:
path: {{ .Values.allInOne.livenessProbe.httpGet.path }}
port: {{ .Values.master.port }}
scheme: {{ .Values.allInOne.livenessProbe.scheme }}
initialDelaySeconds: {{ .Values.allInOne.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.allInOne.livenessProbe.periodSeconds }}
successThreshold: {{ .Values.allInOne.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.allInOne.livenessProbe.failureThreshold }}
timeoutSeconds: {{ .Values.allInOne.livenessProbe.timeoutSeconds }}
{{- end }}
{{- with .Values.allInOne.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.allInOne.containerSecurityContext.enabled }}
securityContext:
{{- omit .Values.allInOne.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
{{- if .Values.allInOne.sidecars }}
{{- include "common.tplvalues.render" (dict "value" .Values.allInOne.sidecars "context" $) | nindent 8 }}
{{- end }}
volumes:
- name: data
{{- if eq .Values.allInOne.data.type "hostPath" }}
hostPath:
path: {{ .Values.allInOne.data.hostPathPrefix }}/seaweedfs-all-in-one-data/
type: DirectoryOrCreate
{{- else if eq .Values.allInOne.data.type "persistentVolumeClaim" }}
persistentVolumeClaim:
claimName: {{ .Values.allInOne.data.claimName }}
{{- else if eq .Values.allInOne.data.type "emptyDir" }}
emptyDir: {}
{{- end }}
{{- if and .Values.allInOne.s3.enabled (or .Values.s3.enableAuth .Values.filer.s3.enableAuth) }}
- name: config-s3-users
secret:
defaultMode: 420
secretName: {{ default (printf "%s-s3-secret" (include "seaweedfs.name" .)) (or .Values.s3.existingConfigSecret .Values.filer.s3.existingConfigSecret) }}
{{- end }}
{{- if .Values.allInOne.sftp.enabled }}
- name: config-ssh
secret:
defaultMode: 420
secretName: {{ default (printf "%s-sftp-ssh-secret" (include "seaweedfs.name" .)) .Values.sftp.existingSshConfigSecret }}
- name: config-users
secret:
defaultMode: 420
secretName: {{ default (printf "%s-sftp-secret" (include "seaweedfs.name" .)) .Values.sftp.existingConfigSecret }}
{{- end }}
{{- if .Values.filer.notificationConfig }}
- name: notification-config
configMap:
name: {{ template "seaweedfs.name" . }}-notification-config
{{- end }}
- name: master-config
configMap:
name: {{ template "seaweedfs.name" . }}-master-config
{{- if .Values.global.enableSecurity }}
- name: security-config
configMap:
name: {{ template "seaweedfs.name" . }}-security-config
- name: ca-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-ca-cert
- name: master-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-master-cert
- name: volume-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-volume-cert
- name: filer-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-filer-cert
- name: client-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-client-cert
{{- end }}
{{ tpl .Values.allInOne.extraVolumes . | nindent 8 }}
{{- if .Values.allInOne.nodeSelector }}
nodeSelector:
{{ tpl .Values.allInOne.nodeSelector . | nindent 8 }}
{{- end }}
{{- end }}

View file

@ -1,21 +0,0 @@
{{- if and .Values.allInOne.enabled (eq .Values.allInOne.data.type "persistentVolumeClaim") }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .Values.allInOne.data.claimName }}
labels:
app.kubernetes.io/component: seaweedfs-all-in-one
{{- if .Values.allInOne.annotations }}
annotations:
{{- toYaml .Values.allInOne.annotations | nindent 4 }}
{{- end }}
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.allInOne.data.size }}
{{- if .Values.allInOne.data.storageClass }}
storageClassName: {{ .Values.allInOne.data.storageClass }}
{{- end }}
{{- end }}

View file

@ -1,83 +0,0 @@
{{- if .Values.allInOne.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ template "seaweedfs.name" . }}-all-in-one
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: seaweedfs-all-in-one
{{- if .Values.allInOne.service.annotations }}
annotations:
{{- toYaml .Values.allInOne.service.annotations | nindent 4 }}
{{- end }}
spec:
internalTrafficPolicy: {{ .Values.allInOne.service.internalTrafficPolicy | default "Cluster" }}
ports:
# Master ports
- name: "swfs-master"
port: {{ .Values.master.port }}
targetPort: {{ .Values.master.port }}
protocol: TCP
- name: "swfs-master-grpc"
port: {{ .Values.master.grpcPort }}
targetPort: {{ .Values.master.grpcPort }}
protocol: TCP
# Volume ports
- name: "swfs-volume"
port: {{ .Values.volume.port }}
targetPort: {{ .Values.volume.port }}
protocol: TCP
- name: "swfs-volume-grpc"
port: {{ .Values.volume.grpcPort }}
targetPort: {{ .Values.volume.grpcPort }}
protocol: TCP
# Filer ports
- name: "swfs-filer"
port: {{ .Values.filer.port }}
targetPort: {{ .Values.filer.port }}
protocol: TCP
- name: "swfs-filer-grpc"
port: {{ .Values.filer.grpcPort }}
targetPort: {{ .Values.filer.grpcPort }}
protocol: TCP
# S3 ports (if enabled)
{{- if .Values.allInOne.s3.enabled }}
- name: "swfs-s3"
port: {{ if .Values.allInOne.s3.enabled }}{{ .Values.s3.port }}{{ else }}{{ .Values.filer.s3.port }}{{ end }}
targetPort: {{ if .Values.allInOne.s3.enabled }}{{ .Values.s3.port }}{{ else }}{{ .Values.filer.s3.port }}{{ end }}
protocol: TCP
{{- if and .Values.allInOne.s3.enabled .Values.s3.httpsPort }}
- name: "swfs-s3-tls"
port: {{ .Values.s3.httpsPort }}
targetPort: {{ .Values.s3.httpsPort }}
protocol: TCP
{{- end }}
{{- end }}
# SFTP ports (if enabled)
{{- if .Values.allInOne.sftp.enabled }}
- name: "swfs-sftp"
port: {{ .Values.sftp.port }}
targetPort: {{ .Values.sftp.port }}
protocol: TCP
{{- end }}
# Server metrics port (single metrics endpoint for all services)
{{- if .Values.allInOne.metricsPort }}
- name: "server-metrics"
port: {{ .Values.allInOne.metricsPort }}
targetPort: {{ .Values.allInOne.metricsPort }}
protocol: TCP
{{- end }}
selector:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
app.kubernetes.io/component: seaweedfs-all-in-one
{{- end }}

View file

@ -1,29 +0,0 @@
{{- if .Values.allInOne.enabled }}
{{- if .Values.global.monitoring.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "seaweedfs.name" . }}-all-in-one
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: all-in-one
{{- with .Values.global.monitoring.additionalLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
endpoints:
{{- if .Values.allInOne.metricsPort }}
- interval: 30s
port: server-metrics
scrapeTimeout: 5s
{{- end }}
selector:
matchLabels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
app.kubernetes.io/component: seaweedfs-all-in-one
{{- end }}
{{- end }}

View file

@ -1,19 +1,14 @@
{{- if and .Values.global.enableSecurity (not .Values.certificates.externalCertificates.enabled)}}
apiVersion: cert-manager.io/v1{{ if .Values.global.certificates.alphacrds }}alpha1{{ end }}
{{- if .Values.global.enableSecurity }}
apiVersion: certmanager.k8s.io/v1alpha1
kind: Certificate
metadata:
name: {{ template "seaweedfs.name" . }}-ca-cert
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
secretName: {{ template "seaweedfs.name" . }}-ca-cert
commonName: "{{ template "seaweedfs.name" . }}-root-ca"
isCA: true
issuerRef:
name: {{ template "seaweedfs.name" . }}-issuer
kind: Issuer
name: {{ template "seaweedfs.name" . }}-clusterissuer
kind: ClusterIssuer
{{- end }}

View file

@ -1,15 +0,0 @@
{{- if and .Values.global.enableSecurity (not .Values.certificates.externalCertificates.enabled)}}
apiVersion: cert-manager.io/v1{{ if .Values.global.certificates.alphacrds }}alpha1{{ end }}
kind: Issuer
metadata:
name: {{ template "seaweedfs.name" . }}-ca-issuer
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
ca:
secretName: {{ template "seaweedfs.name" . }}-ca-cert
{{- end }}

View file

@ -0,0 +1,8 @@
{{- if .Values.global.enableSecurity }}
apiVersion: certmanager.k8s.io/v1alpha1
kind: ClusterIssuer
metadata:
name: {{ template "seaweedfs.name" . }}-clusterissuer
spec:
selfSigned: {}
{{- end }}

View file

@ -1,13 +0,0 @@
{{- if and .Values.global.enableSecurity (not .Values.certificates.externalCertificates.enabled)}}
apiVersion: cert-manager.io/v1{{ if .Values.global.certificates.alphacrds }}alpha1{{ end }}
kind: Issuer
metadata:
name: {{ template "seaweedfs.name" . }}-issuer
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
selfSigned: {}
{{- end }}

View file

@ -1,22 +1,16 @@
{{- if and .Values.global.enableSecurity (not .Values.certificates.externalCertificates.enabled)}}
apiVersion: cert-manager.io/v1{{ if .Values.global.certificates.alphacrds }}alpha1{{ end }}
{{- if .Values.global.enableSecurity }}
apiVersion: certmanager.k8s.io/v1alpha1
kind: Certificate
metadata:
name: {{ template "seaweedfs.name" . }}-client-cert
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
secretName: {{ template "seaweedfs.name" . }}-client-cert
issuerRef:
name: {{ template "seaweedfs.name" . }}-ca-issuer
kind: Issuer
name: {{ template "seaweedfs.name" . }}-clusterissuer
kind: ClusterIssuer
commonName: {{ .Values.certificates.commonName }}
subject:
organizations:
organization:
- "SeaweedFS CA"
dnsNames:
- '*.{{ .Release.Namespace }}'
@ -32,9 +26,8 @@ spec:
- {{ . }}
{{- end }}
{{- end }}
privateKey:
algorithm: {{ .Values.certificates.keyAlgorithm }}
size: {{ .Values.certificates.keySize }}
keyAlgorithm: {{ .Values.certificates.keyAlgorithm }}
keySize: {{ .Values.certificates.keySize }}
duration: {{ .Values.certificates.duration }}
renewBefore: {{ .Values.certificates.renewBefore }}
{{- end }}

View file

@ -1,35 +0,0 @@
{{- if .Values.global.createClusterRole }}
#hack for delete pod master after migration
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.global.serviceAccountName }}-rw-cr
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:serviceaccount:{{ .Values.global.serviceAccountName }}:default
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
subjects:
- kind: ServiceAccount
name: {{ .Values.global.serviceAccountName }}
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ .Values.global.serviceAccountName }}-rw-cr
{{- end }}

View file

@ -1,16 +0,0 @@
{{- if and .Values.cosi.enabled .Values.cosi.bucketClassName }}
---
kind: BucketClass
apiVersion: objectstorage.k8s.io/v1alpha1
metadata:
name: {{ .Values.cosi.bucketClassName }}
driverName: {{ .Values.cosi.driverName }}
deletionPolicy: Delete
---
kind: BucketAccessClass
apiVersion: objectstorage.k8s.io/v1alpha1
metadata:
name: {{ .Values.cosi.bucketClassName }}
driverName: {{ .Values.cosi.driverName }}
authenticationType: KEY
{{- end }}

View file

@ -1,69 +0,0 @@
{{- if .Values.cosi.enabled }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.global.serviceAccountName }}-objectstorage-provisioner
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
rules:
- apiGroups: ["objectstorage.k8s.io"]
resources:
- "buckets"
- "bucketaccesses"
- "bucketclaims"
- "bucketaccessclasses"
- "buckets/status"
- "bucketaccesses/status"
- "bucketclaims/status"
- "bucketaccessclasses/status"
verbs:
- "get"
- "list"
- "watch"
- "update"
- "create"
- "delete"
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs:
- "get"
- "watch"
- "list"
- "delete"
- "update"
- "create"
- apiGroups: [""]
resources:
- "secrets"
- "events"
verbs:
- "get"
- "list"
- "watch"
- "update"
- "create"
- "delete"
- "patch"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.global.serviceAccountName }}-objectstorage-provisioner
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
subjects:
- kind: ServiceAccount
name: {{ .Values.global.serviceAccountName }}-objectstorage-provisioner
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ .Values.global.serviceAccountName }}-objectstorage-provisioner
apiGroup: rbac.authorization.k8s.io
{{- end }}

View file

@ -1,217 +0,0 @@
{{- if .Values.cosi.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "seaweedfs.name" . }}-objectstorage-provisioner
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: objectstorage-provisioner
spec:
replicas: {{ .Values.cosi.replicas }}
selector:
matchLabels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: objectstorage-provisioner
template:
metadata:
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: objectstorage-provisioner
{{ with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.cosi.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
annotations:
{{ with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.cosi.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
restartPolicy: {{ default .Values.global.restartPolicy .Values.cosi.restartPolicy }}
{{- if .Values.cosi.affinity }}
affinity:
{{ tpl .Values.cosi.affinity . | nindent 8 | trim }}
{{- end }}
{{- if .Values.cosi.topologySpreadConstraints }}
topologySpreadConstraints:
{{ tpl .Values.cosi.topologySpreadConstraint . | nindent 8 | trim }}
{{- end }}
{{- if .Values.cosi.tolerations }}
tolerations:
{{ tpl .Values.cosi.tolerations . | nindent 8 | trim }}
{{- end }}
{{- include "seaweedfs.imagePullSecrets" . | nindent 6 }}
terminationGracePeriodSeconds: 10
{{- if .Values.cosi.priorityClassName }}
priorityClassName: {{ .Values.cosi.priorityClassName | quote }}
{{- end }}
enableServiceLinks: false
serviceAccountName: {{ .Values.global.serviceAccountName }}-objectstorage-provisioner
{{- if .Values.cosi.initContainers }}
initContainers:
{{ tpl .Values.cosi.initContainers . | nindent 8 | trim }}
{{- end }}
{{- if .Values.cosi.podSecurityContext.enabled }}
securityContext: {{- omit .Values.cosi.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
containers:
- name: seaweedfs-cosi-driver
image: "{{ .Values.cosi.image }}"
imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }}
env:
- name: DRIVERNAME
value: "{{ .Values.cosi.driverName }}"
- name: ENDPOINT
{{- if .Values.cosi.endpoint }}
value: "{{ .Values.cosi.endpoint }}"
{{- else if .Values.s3.ingress.enabled }}
value: "{{ printf "https://%s" .Values.s3.ingress.host }}"
{{- else if .Values.s3.enabled }}
value: "{{ printf "https://%s-s3.%s.svc" (include "seaweedfs.name" .) .Release.Namespace }}"
{{- else }}
value: "{{ printf "https://%s-filer.%s.svc" (include "seaweedfs.name" .) .Release.Namespace }}"
{{- end }}
{{- with .Values.cosi.region }}
- name: REGION
value: "{{ . }}"
{{- end }}
- name: SEAWEEDFS_FILER
value: "{{ template "seaweedfs.name" . }}-filer:{{ .Values.filer.grpcPort }}"
{{- if .Values.global.enableSecurity }}
- name: WEED_GRPC_CLIENT_KEY
value: /usr/local/share/ca-certificates/client/tls.key
- name: WEED_GRPC_CLIENT_CERT
value: /usr/local/share/ca-certificates/client/tls.crt
- name: WEED_GRPC_CA
value: /usr/local/share/ca-certificates/client/ca.crt
{{- end }}
{{- if .Values.cosi.extraEnvironmentVars }}
{{- range $key, $value := .Values.cosi.extraEnvironmentVars }}
- name: {{ $key }}
{{- if kindIs "string" $value }}
value: {{ $value | quote }}
{{- else }}
valueFrom:
{{ toYaml $value | nindent 16 | trim }}
{{- end -}}
{{- end }}
{{- end }}
{{- if .Values.global.extraEnvironmentVars }}
{{- range $key, $value := .Values.global.extraEnvironmentVars }}
- name: {{ $key }}
{{- if kindIs "string" $value }}
value: {{ $value | quote }}
{{- else }}
valueFrom:
{{ toYaml $value | nindent 16 | trim }}
{{- end -}}
{{- end }}
{{- end }}
volumeMounts:
- mountPath: /var/lib/cosi
name: socket
{{- if .Values.cosi.enableAuth }}
- mountPath: /etc/sw
name: config-users
readOnly: true
{{- end }}
{{- if .Values.global.enableSecurity }}
- name: security-config
readOnly: true
mountPath: /etc/seaweedfs/security.toml
subPath: security.toml
- name: ca-cert
readOnly: true
mountPath: /usr/local/share/ca-certificates/ca/
- name: master-cert
readOnly: true
mountPath: /usr/local/share/ca-certificates/master/
- name: volume-cert
readOnly: true
mountPath: /usr/local/share/ca-certificates/volume/
- name: filer-cert
readOnly: true
mountPath: /usr/local/share/ca-certificates/filer/
- name: client-cert
readOnly: true
mountPath: /usr/local/share/ca-certificates/client/
{{- end }}
{{ tpl .Values.cosi.extraVolumeMounts . | nindent 12 | trim }}
{{- with .Values.cosi.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
- name: seaweedfs-cosi-sidecar
image: "{{ .Values.cosi.sidecar.image }}"
imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }}
args:
- {{ printf "--v=%s" (default "5" .Values.cosi.sidecar.logLevel) }}
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- mountPath: /var/lib/cosi
name: socket
{{- with .Values.cosi.sidecar.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.cosi.containerSecurityContext.enabled }}
securityContext: {{- omit .Values.cosi.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
{{- if .Values.cosi.sidecars }}
{{- include "common.tplvalues.render" (dict "value" .Values.cosi.sidecars "context" $) | nindent 8 }}
{{- end }}
volumes:
- name: socket
emptyDir: {}
{{- if .Values.cosi.enableAuth }}
- name: config-users
secret:
defaultMode: 420
{{- if .Values.cosi.existingConfigSecret }}
secretName: {{ .Values.cosi.existingConfigSecret }}
{{- else }}
secretName: seaweedfs-s3-secret
{{- end }}
{{- end }}
{{- if .Values.global.enableSecurity }}
- name: security-config
configMap:
name: {{ template "seaweedfs.name" . }}-security-config
- name: ca-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-ca-cert
- name: master-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-master-cert
- name: volume-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-volume-cert
- name: filer-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-filer-cert
- name: client-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-client-cert
{{- end }}
{{ tpl .Values.cosi.extraVolumes . | indent 8 | trim }}
{{- if .Values.cosi.nodeSelector }}
nodeSelector:
{{ tpl .Values.cosi.nodeSelector . | indent 8 | trim }}
{{- end }}
{{- end }}

View file

@ -1,13 +0,0 @@
{{- if .Values.cosi.enabled }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Values.global.serviceAccountName }}-objectstorage-provisioner
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
automountServiceAccountToken: {{ .Values.global.automountServiceAccountToken }}
{{- end }}

View file

@ -1,27 +1,16 @@
{{- if and .Values.global.enableSecurity (not .Values.certificates.externalCertificates.enabled)}}
apiVersion: cert-manager.io/v1{{ if .Values.global.certificates.alphacrds }}alpha1{{ end }}
{{- if .Values.global.enableSecurity }}
apiVersion: certmanager.k8s.io/v1alpha1
kind: Certificate
metadata:
name: {{ template "seaweedfs.name" . }}-filer-cert
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: filer
{{- if .Values.filer.annotations }}
annotations:
{{- toYaml .Values.filer.annotations | nindent 4 }}
{{- end }}
spec:
secretName: {{ template "seaweedfs.name" . }}-filer-cert
issuerRef:
name: {{ template "seaweedfs.name" . }}-ca-issuer
kind: Issuer
name: {{ template "seaweedfs.name" . }}-clusterissuer
kind: ClusterIssuer
commonName: {{ .Values.certificates.commonName }}
subject:
organizations:
organization:
- "SeaweedFS CA"
dnsNames:
- '*.{{ .Release.Namespace }}'
@ -37,9 +26,8 @@ spec:
- {{ . }}
{{- end }}
{{- end }}
privateKey:
algorithm: {{ .Values.certificates.keyAlgorithm }}
size: {{ .Values.certificates.keySize }}
keyAlgorithm: {{ .Values.certificates.keyAlgorithm }}
keySize: {{ .Values.certificates.keySize }}
duration: {{ .Values.certificates.duration }}
renewBefore: {{ .Values.certificates.renewBefore }}
{{- end }}

View file

@ -1,48 +0,0 @@
{{- if .Values.filer.enabled }}
{{- if .Values.filer.ingress.enabled }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }}
apiVersion: networking.k8s.io/v1beta1
{{- else }}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: ingress-{{ template "seaweedfs.name" . }}-filer
namespace: {{ .Release.Namespace }}
{{- with .Values.filer.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: filer
spec:
ingressClassName: {{ .Values.filer.ingress.className | quote }}
tls:
{{ .Values.filer.ingress.tls | default list | toYaml | nindent 6}}
rules:
- http:
paths:
- path: /sw-filer/?(.*)
pathType: ImplementationSpecific
backend:
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}
service:
name: {{ template "seaweedfs.name" . }}-filer
port:
number: {{ .Values.filer.port }}
#name:
{{- else }}
serviceName: {{ template "seaweedfs.name" . }}-filer
servicePort: {{ .Values.filer.port }}
{{- end }}
{{- if .Values.filer.ingress.host }}
host: {{ .Values.filer.ingress.host }}
{{- end }}
{{- end }}
{{- end }}

View file

@ -1,22 +1,14 @@
{{- if .Values.filer.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ template "seaweedfs.name" . }}-filer-client
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: filer
app: {{ template "seaweedfs.name" . }}
component: filer
{{- if .Values.filer.metricsPort }}
monitoring: "true"
{{- end }}
{{- if .Values.filer.annotations }}
annotations:
{{- toYaml .Values.filer.annotations | nindent 4 }}
{{- end }}
spec:
clusterIP: None
ports:
@ -35,6 +27,5 @@ spec:
protocol: TCP
{{- end }}
selector:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
app.kubernetes.io/component: filer
{{- end }}
app: {{ template "seaweedfs.name" . }}
component: filer

View file

@ -1,4 +1,3 @@
{{- if .Values.filer.enabled }}
apiVersion: v1
kind: Service
metadata:
@ -7,15 +6,8 @@ metadata:
name: {{ template "seaweedfs.name" . }}-filer
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: filer
{{- if .Values.filer.annotations }}
annotations:
{{- toYaml .Values.filer.annotations | nindent 4 }}
{{- end }}
app: {{ template "seaweedfs.name" . }}
component: filer
spec:
clusterIP: None
publishNotReadyAddresses: true
@ -28,18 +20,6 @@ spec:
port: {{ .Values.filer.grpcPort }}
targetPort: {{ .Values.filer.grpcPort }}
protocol: TCP
{{- if .Values.filer.s3.enabled }}
- name: "swfs-s3"
port: {{ .Values.filer.s3.port }}
targetPort: {{ .Values.filer.s3.port }}
protocol: TCP
{{- if .Values.filer.s3.httpsPort }}
- name: "swfs-s3-tls"
port: {{ .Values.filer.s3.httpsPort }}
targetPort: {{ .Values.filer.s3.httpsPort }}
protocol: TCP
{{- end }}
{{- end }}
{{- if .Values.filer.metricsPort }}
- name: "metrics"
port: {{ .Values.filer.metricsPort }}
@ -47,6 +27,5 @@ spec:
protocol: TCP
{{- end }}
selector:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
app.kubernetes.io/component: filer
{{- end }}
app: {{ template "seaweedfs.name" . }}
component: filer

View file

@ -1,4 +1,3 @@
{{- if .Values.filer.enabled }}
{{- if .Values.filer.metricsPort }}
{{- if .Values.global.monitoring.enabled }}
apiVersion: monitoring.coreos.com/v1
@ -7,18 +6,8 @@ metadata:
name: {{ template "seaweedfs.name" . }}-filer
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: filer
{{- with .Values.global.monitoring.additionalLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if .Values.filer.annotations }}
annotations:
{{- toYaml .Values.filer.annotations | nindent 4 }}
{{- end }}
app: {{ template "seaweedfs.name" . }}
component: filer
spec:
endpoints:
- interval: 30s
@ -26,8 +15,7 @@ spec:
scrapeTimeout: 5s
selector:
matchLabels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
app.kubernetes.io/component: filer
{{- end }}
app: {{ template "seaweedfs.name" . }}
component: filer
{{- end }}
{{- end }}

View file

@ -5,18 +5,13 @@ metadata:
name: {{ template "seaweedfs.name" . }}-filer
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: filer
{{- if .Values.filer.annotations }}
annotations:
{{- toYaml .Values.filer.annotations | nindent 4 }}
{{- end }}
app: {{ template "seaweedfs.name" . }}
chart: {{ template "seaweedfs.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
spec:
serviceName: {{ template "seaweedfs.name" . }}-filer
podManagementPolicy: {{ .Values.filer.podManagementPolicy }}
podManagementPolicy: Parallel
replicas: {{ .Values.filer.replicas }}
{{- if (gt (int .Values.filer.updatePartition) 0) }}
updateStrategy:
@ -26,51 +21,32 @@ spec:
{{- end }}
selector:
matchLabels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: filer
app: {{ template "seaweedfs.name" . }}
chart: {{ template "seaweedfs.chart" . }}
release: {{ .Release.Name }}
component: filer
template:
metadata:
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: filer
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.filer.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
annotations:
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.filer.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.filer.s3.existingConfigSecret }}
{{- $configSecret := (lookup "v1" "Secret" .Release.Namespace .Values.filer.s3.existingConfigSecret) | default dict }}
checksum/s3config: {{ $configSecret | toYaml | sha256sum }}
{{- else }}
checksum/s3config: {{ include (print .Template.BasePath "/s3-secret.yaml") . | sha256sum }}
{{- end }}
app: {{ template "seaweedfs.name" . }}
chart: {{ template "seaweedfs.chart" . }}
release: {{ .Release.Name }}
component: filer
spec:
restartPolicy: {{ default .Values.global.restartPolicy .Values.filer.restartPolicy }}
{{- if .Values.filer.affinity }}
affinity:
{{ tpl .Values.filer.affinity . | nindent 8 | trim }}
{{- end }}
{{- if .Values.filer.topologySpreadConstraints }}
topologySpreadConstraints:
{{ tpl .Values.filer.topologySpreadConstraints . | nindent 8 | trim }}
{{- end }}
{{- if .Values.filer.tolerations }}
tolerations:
{{ tpl .Values.filer.tolerations . | nindent 8 | trim }}
{{- end }}
{{- include "seaweedfs.imagePullSecrets" . | nindent 6 }}
serviceAccountName: {{ .Values.filer.serviceAccountName | default .Values.global.serviceAccountName | quote }} # for deleting statefulset pods after migration
{{- if .Values.global.imagePullSecrets }}
imagePullSecrets:
- name: {{ .Values.global.imagePullSecrets }}
{{- end }}
serviceAccountName: seaweedfs-rw-sa #hack for delete pod master after migration
terminationGracePeriodSeconds: 60
{{- if .Values.filer.priorityClassName }}
priorityClassName: {{ .Values.filer.priorityClassName | quote }}
@ -80,9 +56,6 @@ spec:
initContainers:
{{ tpl .Values.filer.initContainers . | nindent 8 | trim }}
{{- end }}
{{- if .Values.filer.podSecurityContext.enabled }}
securityContext: {{- omit .Values.filer.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
containers:
- name: seaweedfs
image: {{ template "filer.image" . }}
@ -105,41 +78,23 @@ spec:
secretKeyRef:
name: secret-seaweedfs-db
key: user
optional: true
- name: WEED_MYSQL_PASSWORD
valueFrom:
secretKeyRef:
name: secret-seaweedfs-db
key: password
optional: true
- name: SEAWEEDFS_FULLNAME
value: "{{ template "seaweedfs.name" . }}"
{{- if .Values.filer.extraEnvironmentVars }}
{{- range $key, $value := .Values.filer.extraEnvironmentVars }}
- name: {{ $key }}
{{- if kindIs "string" $value }}
value: {{ $value | quote }}
{{- else }}
valueFrom:
{{ toYaml $value | nindent 16 | trim }}
{{- end -}}
{{- end }}
{{- end }}
{{- if .Values.global.extraEnvironmentVars }}
{{- range $key, $value := .Values.global.extraEnvironmentVars }}
- name: {{ $key }}
{{- if kindIs "string" $value }}
value: {{ $value | quote }}
{{- else }}
valueFrom:
{{ toYaml $value | nindent 16 | trim }}
{{- end -}}
{{- end }}
{{- end }}
{{- if .Values.filer.secretExtraEnvironmentVars }}
{{- range $key, $value := .Values.filer.secretExtraEnvironmentVars }}
- name: {{ $key }}
valueFrom: {{ toYaml $value | nindent 16 }}
{{- end }}
{{- end }}
command:
@ -147,7 +102,7 @@ spec:
- "-ec"
- |
exec /usr/bin/weed \
{{- if or (eq .Values.filer.logs.type "hostPath") (eq .Values.filer.logs.type "persistentVolumeClaim") (eq .Values.filer.logs.type "emptyDir") }}
{{- if eq .Values.filer.logs.type "hostPath" }}
-logdir=/logs \
{{- else }}
-logtostderr=true \
@ -162,9 +117,6 @@ spec:
{{- if .Values.filer.metricsPort }}
-metricsPort={{ .Values.filer.metricsPort }} \
{{- end }}
{{- if .Values.filer.metricsIp }}
-metricsIp={{ .Values.filer.metricsIp }} \
{{- end }}
{{- if .Values.filer.redirectOnRead }}
-redirectOnRead \
{{- end }}
@ -176,7 +128,7 @@ spec:
{{- end }}
-dirListLimit={{ .Values.filer.dirListLimit }} \
{{- if .Values.global.enableReplication }}
-defaultReplicaPlacement={{ .Values.global.replicationPlacement }} \
-defaultReplicaPlacement={{ .Values.global.replicationPlacment }} \
{{- else }}
-defaultReplicaPlacement={{ .Values.filer.defaultReplicaPlacement }} \
{{- end }}
@ -190,16 +142,6 @@ spec:
-encryptVolumeData \
{{- end }}
-ip=${POD_IP} \
-ip.bind={{ .Values.filer.ipBind }} \
{{- if .Values.filer.filerGroup}}
-filerGroup={{ .Values.filer.filerGroup}} \
{{- end }}
{{- if .Values.filer.rack }}
-rack={{ .Values.filer.rack }} \
{{- end }}
{{- if .Values.filer.dataCenter }}
-dataCenter={{ .Values.filer.dataCenter }} \
{{- end }}
{{- if .Values.filer.s3.enabled }}
-s3 \
-s3.port={{ .Values.filer.s3.port }} \
@ -207,13 +149,10 @@ spec:
-s3.domainName={{ .Values.filer.s3.domainName }} \
{{- end }}
{{- if .Values.global.enableSecurity }}
{{- if .Values.filer.s3.httpsPort }}
-s3.port.https={{ .Values.filer.s3.httpsPort }} \
{{- end }}
-s3.cert.file=/usr/local/share/ca-certificates/client/tls.crt \
-s3.key.file=/usr/local/share/ca-certificates/client/tls.key \
{{- end }}
{{- if eq (typeOf .Values.filer.s3.allowEmptyFolder) "bool" }}
{{- if .Values.filer.s3.allowEmptyFolder }}
-s3.allowEmptyFolder={{ .Values.filer.s3.allowEmptyFolder }} \
{{- end }}
{{- if .Values.filer.s3.enableAuth }}
@ -223,30 +162,17 @@ spec:
-s3.auditLogConfig=/etc/sw/filer_s3_auditLogConfig.json \
{{- end }}
{{- end }}
-master={{ if .Values.global.masterServer }}{{.Values.global.masterServer}}{{ else }}{{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}{{ end }} \
{{- range .Values.filer.extraArgs }}
{{ . }} \
{{- end }}
-master={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
volumeMounts:
{{- if (or (eq .Values.filer.logs.type "hostPath") (eq .Values.filer.logs.type "persistentVolumeClaim") (eq .Values.filer.logs.type "emptyDir")) }}
- name: seaweedfs-filer-log-volume
mountPath: "/logs/"
{{- end }}
{{- if .Values.filer.s3.enableAuth }}
- name: config-users
mountPath: /etc/sw
- mountPath: /etc/sw
name: config-users
readOnly: true
{{- end }}
{{- if (or .Values.filer.enablePVC (or (eq .Values.filer.data.type "hostPath") (eq .Values.filer.data.type "persistentVolumeClaim") (eq .Values.filer.data.type "emptyDir"))) }}
{{- if .Values.filer.enablePVC }}
- name: data-filer
mountPath: /data
{{- end }}
{{- if .Values.filer.notificationConfig }}
- name: notification-config
readOnly: true
mountPath: /etc/seaweedfs/notification.toml
subPath: notification.toml
{{- end }}
{{- if .Values.global.enableSecurity }}
- name: security-config
readOnly: true
@ -276,48 +202,30 @@ spec:
name: metrics
- containerPort: {{ .Values.filer.grpcPort }}
#name: swfs-filer-grpc
{{- if .Values.filer.s3.enabled }}
- containerPort: {{ .Values.filer.s3.port }}
name: swfs-s3
{{- if .Values.filer.s3.httpsPort }}
- containerPort: {{ .Values.filer.s3.httpsPort }}
name: swfs-s3-tls
{{- end }}
{{- end }}
{{- if .Values.filer.readinessProbe.enabled }}
readinessProbe:
httpGet:
path: {{ .Values.filer.readinessProbe.httpGet.path }}
path: /
port: {{ .Values.filer.port }}
scheme: {{ .Values.filer.readinessProbe.scheme }}
initialDelaySeconds: {{ .Values.filer.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.filer.readinessProbe.periodSeconds }}
successThreshold: {{ .Values.filer.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.filer.readinessProbe.failureThreshold }}
timeoutSeconds: {{ .Values.filer.readinessProbe.timeoutSeconds }}
{{- end }}
{{- if .Values.filer.livenessProbe.enabled }}
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 15
successThreshold: 1
failureThreshold: 100
timeoutSeconds: 10
livenessProbe:
httpGet:
path: {{ .Values.filer.livenessProbe.httpGet.path }}
path: /
port: {{ .Values.filer.port }}
scheme: {{ .Values.filer.livenessProbe.scheme }}
initialDelaySeconds: {{ .Values.filer.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.filer.livenessProbe.periodSeconds }}
successThreshold: {{ .Values.filer.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.filer.livenessProbe.failureThreshold }}
timeoutSeconds: {{ .Values.filer.livenessProbe.timeoutSeconds }}
{{- end }}
{{- with .Values.filer.resources }}
scheme: HTTP
initialDelaySeconds: 20
periodSeconds: 30
successThreshold: 1
failureThreshold: 5
timeoutSeconds: 10
{{- if .Values.filer.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{ tpl .Values.filer.resources . | nindent 12 | trim }}
{{- end }}
{{- if .Values.filer.containerSecurityContext.enabled }}
securityContext: {{- omit .Values.filer.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
{{- if .Values.filer.sidecars }}
{{- include "common.tplvalues.render" (dict "value" .Values.filer.sidecars "context" $) | nindent 8 }}
{{- end }}
volumes:
{{- if eq .Values.filer.logs.type "hostPath" }}
- name: seaweedfs-filer-log-volume
@ -325,48 +233,19 @@ spec:
path: {{ .Values.filer.logs.hostPathPrefix }}/logs/seaweedfs/filer
type: DirectoryOrCreate
{{- end }}
{{- if eq .Values.filer.logs.type "existingClaim" }}
- name: seaweedfs-filer-log-volume
persistentVolumeClaim:
claimName: {{ .Values.filer.logs.claimName }}
{{- end }}
{{- if eq .Values.filer.logs.type "emptyDir" }}
- name: seaweedfs-filer-log-volume
emptyDir: {}
{{- end }}
{{- if eq .Values.filer.data.type "hostPath" }}
- name: data-filer
hostPath:
path: {{ .Values.filer.data.hostPathPrefix }}/filer_store
type: DirectoryOrCreate
{{- end }}
{{- if eq .Values.filer.data.type "existingClaim" }}
- name: data-filer
persistentVolumeClaim:
claimName: {{ .Values.filer.data.claimName }}
{{- end }}
{{- if eq .Values.filer.data.type "emptyDir" }}
- name: data-filer
emptyDir: {}
{{- end }}
- name: db-schema-config-volume
configMap:
name: seaweedfs-db-init-config
{{- if and .Values.filer.s3.enabled .Values.filer.s3.enableAuth }}
- name: config-users
secret:
defaultMode: 420
{{- if .Values.filer.s3.existingConfigSecret }}
secretName: {{ .Values.filer.s3.existingConfigSecret }}
{{- else }}
secretName: seaweedfs-s3-secret
{{- end }}
{{- end }}
{{- if .Values.filer.notificationConfig }}
- name: notification-config
configMap:
name: {{ template "seaweedfs.name" . }}-notification-config
{{- end }}
{{- if .Values.global.enableSecurity }}
- name: security-config
configMap:
@ -392,7 +271,7 @@ spec:
nodeSelector:
{{ tpl .Values.filer.nodeSelector . | indent 8 | trim }}
{{- end }}
{{- if and (.Values.filer.enablePVC) (eq .Values.filer.data.type "persistentVolumeClaim") }}
{{- if .Values.filer.enablePVC }}
# DEPRECATION: Deprecate in favor of filer.data section below
volumeClaimTemplates:
- metadata:
@ -410,13 +289,9 @@ spec:
{{- $pvc_exists := include "filer.pvc_exists" . -}}
{{- if $pvc_exists }}
volumeClaimTemplates:
{{- if eq .Values.filer.data.type "persistentVolumeClaim" }}
{{- if eq .Values.filer.data.type "persistentVolumeClaim"}}
- metadata:
name: data-filer
{{- with .Values.filer.data.annotations }}
annotations:
{{- toYaml . | nindent 10 }}
{{- end }}
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: {{ .Values.filer.data.storageClass }}
@ -424,13 +299,9 @@ spec:
requests:
storage: {{ .Values.filer.data.size }}
{{- end }}
{{- if eq .Values.filer.logs.type "persistentVolumeClaim" }}
{{- if eq .Values.filer.logs.type "persistentVolumeClaim"}}
- metadata:
name: seaweedfs-filer-log-volume
{{- with .Values.filer.logs.annotations }}
annotations:
{{- toYaml . | nindent 10 }}
{{- end }}
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: {{ .Values.filer.logs.storageClass }}

View file

@ -0,0 +1,67 @@
{{- if .Values.filer.ingress.enabled }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }}
apiVersion: networking.k8s.io/v1beta1
{{- else }}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: ingress-{{ template "seaweedfs.name" . }}-filer
namespace: {{ .Release.Namespace }}
annotations:
{{ omit .Values.filer.ingress.annotations "kubernetes.io/ingress.class" | toYaml | nindent 4 }}
spec:
ingressClassName: {{ .Values.filer.ingress.className | quote }}
rules:
- http:
paths:
- path: /sw-filer/?(.*)
pathType: ImplementationSpecific
backend:
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}
service:
name: {{ template "seaweedfs.name" . }}-filer
port:
number: {{ .Values.filer.port }}
#name:
{{- else }}
serviceName: {{ template "seaweedfs.name" . }}-filer
servicePort: {{ .Values.filer.port }}
{{- end }}
{{- end }}
---
{{- if .Values.master.ingress.enabled }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }}
apiVersion: networking.k8s.io/v1beta1
{{- else }}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: ingress-{{ template "seaweedfs.name" . }}-master
namespace: {{ .Release.Namespace }}
annotations:
{{ omit .Values.master.ingress.annotations "kubernetes.io/ingress.class" | toYaml | nindent 4 }}
spec:
ingressClassName: {{ .Values.master.ingress.className | quote }}
rules:
- http:
paths:
- path: /sw-master/?(.*)
pathType: ImplementationSpecific
backend:
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}
service:
name: {{ template "seaweedfs.name" . }}-master
port:
number: {{ .Values.master.port }}
#name:
{{- else }}
serviceName: {{ template "seaweedfs.name" . }}-master
servicePort: {{ .Values.master.port }}
{{- end }}
{{- end }}

View file

@ -1,27 +1,16 @@
{{- if and .Values.global.enableSecurity (not .Values.certificates.externalCertificates.enabled)}}
apiVersion: cert-manager.io/v1{{ if .Values.global.certificates.alphacrds }}alpha1{{ end }}
{{- if .Values.global.enableSecurity }}
apiVersion: certmanager.k8s.io/v1alpha1
kind: Certificate
metadata:
name: {{ template "seaweedfs.name" . }}-master-cert
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: master
{{- if .Values.master.annotations }}
annotations:
{{- toYaml .Values.master.annotations | nindent 4 }}
{{- end }}
spec:
secretName: {{ template "seaweedfs.name" . }}-master-cert
issuerRef:
name: {{ template "seaweedfs.name" . }}-ca-issuer
kind: Issuer
name: {{ template "seaweedfs.name" . }}-clusterissuer
kind: ClusterIssuer
commonName: {{ .Values.certificates.commonName }}
subject:
organizations:
organization:
- "SeaweedFS CA"
dnsNames:
- '*.{{ .Release.Namespace }}'
@ -37,9 +26,8 @@ spec:
- {{ . }}
{{- end }}
{{- end }}
privateKey:
algorithm: {{ .Values.certificates.keyAlgorithm }}
size: {{ .Values.certificates.keySize }}
keyAlgorithm: {{ .Values.certificates.keyAlgorithm }}
keySize: {{ .Values.certificates.keySize }}
duration: {{ .Values.certificates.duration }}
renewBefore: {{ .Values.certificates.renewBefore }}
{{- end }}

View file

@ -1,19 +0,0 @@
{{- if or .Values.master.enabled .Values.allInOne.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "seaweedfs.name" . }}-master-config
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Values.master.annotations }}
annotations:
{{- toYaml .Values.master.annotations | nindent 4 }}
{{- end }}
data:
master.toml: |-
{{ .Values.master.config | nindent 4 }}
{{- end }}

View file

@ -1,48 +0,0 @@
{{- if .Values.master.enabled }}
{{- if .Values.master.ingress.enabled }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }}
apiVersion: networking.k8s.io/v1beta1
{{- else }}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: ingress-{{ template "seaweedfs.name" . }}-master
namespace: {{ .Release.Namespace }}
{{- with .Values.master.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: master
spec:
ingressClassName: {{ .Values.master.ingress.className | quote }}
tls:
{{ .Values.master.ingress.tls | default list | toYaml | nindent 6 }}
rules:
- http:
paths:
- path: /sw-master/?(.*)
pathType: ImplementationSpecific
backend:
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}
service:
name: {{ template "seaweedfs.name" . }}-master
port:
number: {{ .Values.master.port }}
#name:
{{- else }}
serviceName: {{ template "seaweedfs.name" . }}-master
servicePort: {{ .Values.master.port }}
{{- end }}
{{- if .Values.filer.ingress.host }}
host: {{ .Values.master.ingress.host }}
{{- end }}
{{- end }}
{{- end }}

View file

@ -1,19 +1,13 @@
{{- if .Values.master.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ template "seaweedfs.name" . }}-master
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
app.kubernetes.io/component: master
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app: {{ template "seaweedfs.name" . }}
component: master
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
{{- if .Values.master.annotations }}
{{- toYaml .Values.master.annotations | nindent 4 }}
{{- end }}
spec:
clusterIP: None
publishNotReadyAddresses: true
@ -33,6 +27,5 @@ spec:
protocol: TCP
{{- end }}
selector:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
app.kubernetes.io/component: master
{{- end }}
app: {{ template "seaweedfs.name" . }}
component: master

View file

@ -1,4 +1,3 @@
{{- if .Values.master.enabled }}
{{- if .Values.master.metricsPort }}
{{- if .Values.global.monitoring.enabled }}
apiVersion: monitoring.coreos.com/v1
@ -7,18 +6,8 @@ metadata:
name: {{ template "seaweedfs.name" . }}-master
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: master
{{- with .Values.global.monitoring.additionalLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if .Values.master.annotations }}
annotations:
{{- toYaml .Values.master.annotations | nindent 4 }}
{{- end }}
app: {{ template "seaweedfs.name" . }}
component: master
spec:
endpoints:
- interval: 30s
@ -26,8 +15,7 @@ spec:
scrapeTimeout: 5s
selector:
matchLabels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
app.kubernetes.io/component: master
{{- end }}
app: {{ template "seaweedfs.name" . }}
component: master
{{- end }}
{{- end }}

View file

@ -5,18 +5,13 @@ metadata:
name: {{ template "seaweedfs.name" . }}-master
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: master
{{- if .Values.master.annotations }}
annotations:
{{- toYaml .Values.master.annotations | nindent 4 }}
{{- end }}
app: {{ template "seaweedfs.name" . }}
chart: {{ template "seaweedfs.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
spec:
serviceName: {{ template "seaweedfs.name" . }}-master
podManagementPolicy: {{ .Values.master.podManagementPolicy }}
podManagementPolicy: Parallel
replicas: {{ .Values.master.replicas }}
{{- if (gt (int .Values.master.updatePartition) 0) }}
updateStrategy:
@ -26,59 +21,40 @@ spec:
{{- end }}
selector:
matchLabels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: master
app: {{ template "seaweedfs.name" . }}
chart: {{ template "seaweedfs.chart" . }}
release: {{ .Release.Name }}
component: master
template:
metadata:
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: master
{{ with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.master.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
annotations:
{{ with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.master.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
app: {{ template "seaweedfs.name" . }}
chart: {{ template "seaweedfs.chart" . }}
release: {{ .Release.Name }}
component: master
spec:
restartPolicy: {{ default .Values.global.restartPolicy .Values.master.restartPolicy }}
{{- if .Values.master.affinity }}
affinity:
{{ tpl .Values.master.affinity . | nindent 8 | trim }}
{{- end }}
{{- if .Values.master.topologySpreadConstraints }}
topologySpreadConstraints:
{{ tpl .Values.master.topologySpreadConstraints . | nindent 8 | trim }}
{{- end }}
{{- if .Values.master.tolerations }}
tolerations:
{{ tpl .Values.master.tolerations . | nindent 8 | trim }}
{{- end }}
{{- include "seaweedfs.imagePullSecrets" . | nindent 6 }}
{{- if .Values.global.imagePullSecrets }}
imagePullSecrets:
- name: {{ .Values.global.imagePullSecrets }}
{{- end }}
terminationGracePeriodSeconds: 60
{{- if .Values.master.priorityClassName }}
priorityClassName: {{ .Values.master.priorityClassName | quote }}
{{- end }}
enableServiceLinks: false
{{- if .Values.global.createClusterRole }}
serviceAccountName: {{ .Values.master.serviceAccountName | default .Values.global.serviceAccountName | quote }} # for deleting statefulset pods after migration
{{- end }}
{{- if .Values.master.initContainers }}
initContainers:
{{ tpl .Values.master.initContainers . | nindent 8 | trim }}
{{- end }}
{{- if .Values.master.podSecurityContext.enabled }}
securityContext: {{- omit .Values.master.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
containers:
- name: seaweedfs
image: {{ template "master.image" . }}
@ -101,23 +77,13 @@ spec:
{{- if .Values.master.extraEnvironmentVars }}
{{- range $key, $value := .Values.master.extraEnvironmentVars }}
- name: {{ $key }}
{{- if kindIs "string" $value }}
value: {{ $value | quote }}
{{- else }}
valueFrom:
{{ toYaml $value | nindent 16 | trim }}
{{- end -}}
{{- end }}
{{- end }}
{{- if .Values.global.extraEnvironmentVars }}
{{- range $key, $value := .Values.global.extraEnvironmentVars }}
- name: {{ $key }}
{{- if kindIs "string" $value }}
value: {{ $value | quote }}
{{- else }}
valueFrom:
{{ toYaml $value | nindent 16 | trim }}
{{- end -}}
{{- end }}
{{- end }}
command:
@ -125,7 +91,7 @@ spec:
- "-ec"
- |
exec /usr/bin/weed \
{{- if or (eq .Values.master.logs.type "hostPath") (eq .Values.master.logs.type "persistentVolumeClaim") (eq .Values.master.logs.type "emptyDir") }}
{{- if eq .Values.master.logs.type "hostPath" }}
-logdir=/logs \
{{- else }}
-logtostderr=true \
@ -140,7 +106,7 @@ spec:
-mdir=/data \
-ip.bind={{ .Values.master.ipBind }} \
{{- if .Values.global.enableReplication }}
-defaultReplication={{ .Values.global.replicationPlacement }} \
-defaultReplication={{ .Values.global.replicationPlacment }} \
{{- else }}
-defaultReplication={{ .Values.master.defaultReplication }} \
{{- end }}
@ -153,52 +119,27 @@ spec:
{{- if .Values.master.metricsIntervalSec }}
-metrics.intervalSeconds={{ .Values.master.metricsIntervalSec }} \
{{- end }}
{{- end }}
{{- end }}
{{- if .Values.master.metricsPort }}
{{- else if .Values.master.metricsPort }}
-metricsPort={{ .Values.master.metricsPort }} \
{{- end }}
{{- if .Values.master.metricsIp }}
-metricsIp={{ .Values.master.metricsIp }} \
{{- end }}
-volumeSizeLimitMB={{ .Values.master.volumeSizeLimitMB }} \
{{- if .Values.master.disableHttp }}
-disableHttp \
{{- end }}
{{- if .Values.master.resumeState }}
-resumeState \
{{- end }}
{{- if .Values.master.raftHashicorp }}
-raftHashicorp \
{{- end }}
{{- if .Values.master.raftBootstrap }}
-raftBootstrap \
{{- end }}
{{- if .Values.master.electionTimeout }}
-electionTimeout={{ .Values.master.electionTimeout }} \
{{- end }}
{{- if .Values.master.heartbeatInterval }}
-heartbeatInterval={{ .Values.master.heartbeatInterval }} \
{{- if .Values.master.pulseSeconds }}
-pulseSeconds={{ .Values.master.pulseSeconds }} \
{{- end }}
{{- if .Values.master.garbageThreshold }}
-garbageThreshold={{ .Values.master.garbageThreshold }} \
{{- end }}
-ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-master.{{ .Release.Namespace }} \
-peers={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }} \
{{- range .Values.master.extraArgs }}
{{ . }} \
{{- end }}
-peers={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
volumeMounts:
- name : data-{{ .Release.Namespace }}
mountPath: /data
{{- if or (eq .Values.master.logs.type "hostPath") (eq .Values.master.logs.type "persistentVolumeClaim") (eq .Values.master.logs.type "emptyDir") }}
- name: seaweedfs-master-log-volume
mountPath: "/logs/"
{{- end }}
- name: master-config
readOnly: true
mountPath: /etc/seaweedfs/master.toml
subPath: master.toml
{{- if .Values.global.enableSecurity }}
- name: security-config
readOnly: true
@ -230,40 +171,32 @@ spec:
{{- end }}
- containerPort: {{ .Values.master.grpcPort }}
#name: swfs-master-grpc
{{- if .Values.master.readinessProbe.enabled }}
readinessProbe:
httpGet:
path: {{ .Values.master.readinessProbe.httpGet.path }}
path: /cluster/status
port: {{ .Values.master.port }}
scheme: {{ .Values.master.readinessProbe.scheme }}
initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }}
successThreshold: {{ .Values.master.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }}
timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }}
{{- end }}
{{- if .Values.master.livenessProbe.enabled }}
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 45
successThreshold: 2
failureThreshold: 100
timeoutSeconds: 10
livenessProbe:
httpGet:
path: {{ .Values.master.livenessProbe.httpGet.path }}
path: /cluster/status
port: {{ .Values.master.port }}
scheme: {{ .Values.master.livenessProbe.scheme }}
initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }}
successThreshold: {{ .Values.master.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }}
timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }}
{{- end }}
{{- with .Values.master.resources }}
scheme: HTTP
initialDelaySeconds: 20
periodSeconds: 30
successThreshold: 1
failureThreshold: 4
timeoutSeconds: 10
{{- if .Values.master.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{ tpl .Values.master.resources . | nindent 12 | trim }}
{{- end }}
{{- if .Values.master.containerSecurityContext.enabled }}
securityContext: {{- omit .Values.master.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
{{- if .Values.master.sidecars }}
{{- include "common.tplvalues.render" (dict "value" .Values.master.sidecars "context" $) | nindent 8 }}
{{- end }}
{{- $hostpath_exists := include "master.hostpath_exists" . -}}
{{- if $hostpath_exists }}
volumes:
{{- if eq .Values.master.logs.type "hostPath" }}
- name: seaweedfs-master-log-volume
@ -271,33 +204,12 @@ spec:
path: {{ .Values.master.logs.hostPathPrefix }}/logs/seaweedfs/master
type: DirectoryOrCreate
{{- end }}
{{- if eq .Values.master.logs.type "existingClaim" }}
- name: seaweedfs-master-log-volume
persistentVolumeClaim:
claimName: {{ .Values.master.logs.claimName }}
{{- end }}
{{- if eq .Values.master.logs.type "emptyDir" }}
- name: seaweedfs-master-log-volume
emptyDir: {}
{{- end }}
{{- if eq .Values.master.data.type "hostPath" }}
- name: data-{{ .Release.Namespace }}
hostPath:
path: {{ .Values.master.data.hostPathPrefix }}/seaweed-master/
type: DirectoryOrCreate
{{- end }}
{{- if eq .Values.master.data.type "existingClaim" }}
- name: data-{{ .Release.Namespace }}
persistentVolumeClaim:
claimName: {{ .Values.master.data.claimName }}
{{- end }}
{{- if eq .Values.master.data.type "emptyDir" }}
- name: data-{{ .Release.Namespace }}
emptyDir: {}
{{- end }}
- name: master-config
configMap:
name: {{ template "seaweedfs.name" . }}-master-config
{{- if .Values.global.enableSecurity }}
- name: security-config
configMap:
@ -319,20 +231,17 @@ spec:
secretName: {{ template "seaweedfs.name" . }}-client-cert
{{- end }}
{{ tpl .Values.master.extraVolumes . | indent 8 | trim }}
{{- end }}
{{- if .Values.master.nodeSelector }}
nodeSelector:
{{ tpl .Values.master.nodeSelector . | indent 8 | trim }}
{{- end }}
{{- $pvc_exists := include "master.pvc_exists" . -}}
{{- $pvc_exists := include "volume.pvc_exists" . -}}
{{- if $pvc_exists }}
volumeClaimTemplates:
{{- if eq .Values.master.data.type "persistentVolumeClaim"}}
- metadata:
name: data-{{ .Release.Namespace }}
{{- with .Values.master.data.annotations }}
annotations:
{{- toYaml . | nindent 10 }}
{{- end }}
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: {{ .Values.master.data.storageClass }}
@ -343,10 +252,6 @@ spec:
{{- if eq .Values.master.logs.type "persistentVolumeClaim"}}
- metadata:
name: seaweedfs-master-log-volume
{{- with .Values.master.logs.annotations }}
annotations:
{{- toYaml . | nindent 10 }}
{{- end }}
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: {{ .Values.master.logs.storageClass }}

View file

@ -1,19 +0,0 @@
{{- if and .Values.filer.enabled .Values.filer.notificationConfig }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "seaweedfs.name" . }}-notification-config
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Values.filer.annotations }}
annotations:
{{- toYaml .Values.filer.annotations | nindent 4 }}
{{- end }}
data:
notification.toml: |-
{{ .Values.filer.notificationConfig | nindent 4 }}
{{- end }}

View file

@ -1,122 +0,0 @@
{{- if .Values.master.enabled }}
{{- if .Values.filer.s3.enabled }}
{{- if .Values.filer.s3.createBuckets }}
---
apiVersion: batch/v1
kind: Job
metadata:
name: "{{ $.Release.Name }}-bucket-hook"
labels:
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
app.kubernetes.io/instance: {{ .Release.Name | quote }}
annotations:
"helm.sh/hook": post-install
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": hook-succeeded
spec:
template:
metadata:
name: "{{ .Release.Name }}"
labels:
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
app.kubernetes.io/instance: {{ .Release.Name | quote }}
spec:
restartPolicy: Never
{{- if .Values.filer.podSecurityContext.enabled }}
securityContext: {{- omit .Values.filer.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
containers:
- name: post-install-job
image: {{ template "master.image" . }}
env:
- name: WEED_CLUSTER_DEFAULT
value: "sw"
- name: WEED_CLUSTER_SW_MASTER
value: "{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}:{{ .Values.master.port }}"
- name: WEED_CLUSTER_SW_FILER
value: "{{ template "seaweedfs.name" . }}-filer-client.{{ .Release.Namespace }}:{{ .Values.filer.port }}"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: SEAWEEDFS_FULLNAME
value: "{{ template "seaweedfs.name" . }}"
command:
- "/bin/sh"
- "-ec"
- |
wait_for_service() {
local url=$1
local max_attempts=60 # 5 minutes total (5s * 60)
local attempt=1
echo "Waiting for service at $url..."
while [ $attempt -le $max_attempts ]; do
if wget -q --spider "$url" >/dev/null 2>&1; then
echo "Service at $url is up!"
return 0
fi
echo "Attempt $attempt: Service not ready yet, retrying in 5s..."
sleep 5
attempt=$((attempt + 1))
done
echo "Service at $url failed to become ready within 5 minutes"
exit 1
}
wait_for_service "http://$WEED_CLUSTER_SW_MASTER{{ .Values.master.readinessProbe.httpGet.path }}"
wait_for_service "http://$WEED_CLUSTER_SW_FILER{{ .Values.filer.readinessProbe.httpGet.path }}"
{{- range $reg, $props := $.Values.filer.s3.createBuckets }}
exec /bin/echo \
"s3.bucket.create --name {{ $props.name }}" |\
/usr/bin/weed shell
{{- end }}
{{- range $reg, $props := $.Values.filer.s3.createBuckets }}
{{- if $props.anonymousRead }}
exec /bin/echo \
"s3.configure --user anonymous \
--buckets {{ $props.name }} \
--actions Read \
--apply true" |\
/usr/bin/weed shell
{{- end }}
{{- end }}
{{- if .Values.filer.s3.enableAuth }}
volumeMounts:
- name: config-users
mountPath: /etc/sw
readOnly: true
{{- end }}
ports:
- containerPort: {{ .Values.master.port }}
name: swfs-master
{{- if and .Values.global.monitoring.enabled .Values.master.metricsPort }}
- containerPort: {{ .Values.master.metricsPort }}
name: metrics
{{- end }}
- containerPort: {{ .Values.master.grpcPort }}
#name: swfs-master-grpc
{{- if .Values.filer.containerSecurityContext.enabled }}
securityContext: {{- omit .Values.filer.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
{{- if .Values.filer.s3.enableAuth }}
volumes:
- name: config-users
secret:
defaultMode: 420
{{- if not (empty .Values.filer.s3.existingConfigSecret) }}
secretName: {{ .Values.filer.s3.existingConfigSecret }}
{{- else }}
secretName: seaweedfs-s3-secret
{{- end }}
{{- end }}{{/** if .Values.filer.s3.enableAuth **/}}
{{- end }}{{/** if .Values.master.enabled **/}}
{{- end }}{{/** if .Values.filer.s3.enabled **/}}
{{- end }}{{/** if .Values.filer.s3.createBuckets **/}}

View file

@ -5,72 +5,44 @@ metadata:
name: {{ template "seaweedfs.name" . }}-s3
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: s3
{{- if .Values.s3.annotations }}
annotations:
{{- toYaml .Values.s3.annotations | nindent 4 }}
{{- end }}
app: {{ template "seaweedfs.name" . }}
chart: {{ template "seaweedfs.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
spec:
replicas: {{ .Values.s3.replicas }}
selector:
matchLabels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: s3
app: {{ template "seaweedfs.name" . }}
chart: {{ template "seaweedfs.chart" . }}
release: {{ .Release.Name }}
component: s3
template:
metadata:
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: s3
{{ with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.s3.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
annotations:
{{ with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.s3.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
app: {{ template "seaweedfs.name" . }}
chart: {{ template "seaweedfs.chart" . }}
release: {{ .Release.Name }}
component: s3
spec:
restartPolicy: {{ default .Values.global.restartPolicy .Values.s3.restartPolicy }}
{{- if .Values.s3.affinity }}
affinity:
{{ tpl .Values.s3.affinity . | nindent 8 | trim }}
{{- end }}
{{- if .Values.s3.topologySpreadConstraints }}
topologySpreadConstraints:
{{ tpl .Values.s3.topologySpreadConstraints . | nindent 8 | trim }}
{{- end }}
{{- if .Values.s3.tolerations }}
tolerations:
{{ tpl .Values.s3.tolerations . | nindent 8 | trim }}
{{- end }}
{{- include "seaweedfs.imagePullSecrets" . | nindent 6 }}
{{- if .Values.global.imagePullSecrets }}
imagePullSecrets:
- name: {{ .Values.global.imagePullSecrets }}
{{- end }}
terminationGracePeriodSeconds: 10
{{- if .Values.s3.priorityClassName }}
priorityClassName: {{ .Values.s3.priorityClassName | quote }}
{{- end }}
enableServiceLinks: false
{{- if .Values.s3.serviceAccountName }}
serviceAccountName: {{ .Values.s3.serviceAccountName | quote }}
{{- end }}
{{- if .Values.s3.initContainers }}
initContainers:
{{ tpl .Values.s3.initContainers . | nindent 8 | trim }}
{{- end }}
{{- if .Values.s3.podSecurityContext.enabled }}
securityContext: {{- omit .Values.s3.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
containers:
- name: seaweedfs
image: {{ template "s3.image" . }}
@ -90,26 +62,10 @@ spec:
fieldPath: metadata.namespace
- name: SEAWEEDFS_FULLNAME
value: "{{ template "seaweedfs.name" . }}"
{{- if .Values.s3.extraEnvironmentVars }}
{{- range $key, $value := .Values.s3.extraEnvironmentVars }}
- name: {{ $key }}
{{- if kindIs "string" $value }}
value: {{ $value | quote }}
{{- else }}
valueFrom:
{{ toYaml $value | nindent 16 | trim }}
{{- end -}}
{{- end }}
{{- end }}
{{- if .Values.global.extraEnvironmentVars }}
{{- range $key, $value := .Values.global.extraEnvironmentVars }}
- name: {{ $key }}
{{- if kindIs "string" $value }}
value: {{ $value | quote }}
{{- else }}
valueFrom:
{{ toYaml $value | nindent 16 | trim }}
{{- end -}}
{{- end }}
{{- end }}
command:
@ -117,7 +73,7 @@ spec:
- "-ec"
- |
exec /usr/bin/weed \
{{- if or (eq .Values.s3.logs.type "hostPath") (eq .Values.s3.logs.type "emptyDir") }}
{{- if eq .Values.s3.logs.type "hostPath" }}
-logdir=/logs \
{{- else }}
-logtostderr=true \
@ -134,16 +90,13 @@ spec:
-metricsPort {{ .Values.s3.metricsPort }} \
{{- end }}
{{- if .Values.global.enableSecurity }}
{{- if .Values.s3.httpsPort }}
-port.https={{ .Values.s3.httpsPort }} \
{{- end }}
-cert.file=/usr/local/share/ca-certificates/client/tls.crt \
-key.file=/usr/local/share/ca-certificates/client/tls.key \
{{- end }}
{{- if .Values.s3.domainName }}
-domainName={{ .Values.s3.domainName }} \
{{- end }}
{{- if eq (typeOf .Values.s3.allowEmptyFolder) "bool" }}
{{- if .Values.s3.allowEmptyFolder }}
-allowEmptyFolder={{ .Values.s3.allowEmptyFolder }} \
{{- end }}
{{- if .Values.s3.enableAuth }}
@ -154,15 +107,11 @@ spec:
{{- end }}
-filer={{ template "seaweedfs.name" . }}-filer-client.{{ .Release.Namespace }}:{{ .Values.filer.port }}
volumeMounts:
{{- if or (eq .Values.s3.logs.type "hostPath") (eq .Values.s3.logs.type "emptyDir") }}
- name: logs
mountPath: "/logs/"
{{- end }}
{{- if .Values.s3.enableAuth }}
- mountPath: /etc/sw
name: config-users
readOnly: true
{{- end }}
{{- if .Values.global.enableSecurity }}
- name: security-config
readOnly: true
@ -188,69 +137,45 @@ spec:
ports:
- containerPort: {{ .Values.s3.port }}
name: swfs-s3
{{- if .Values.s3.httpsPort }}
- containerPort: {{ .Values.s3.httpsPort }}
name: swfs-s3-tls
{{- end }}
{{- if .Values.s3.metricsPort }}
- containerPort: {{ .Values.s3.metricsPort }}
name: metrics
name: "metrics"
{{- end }}
{{- if .Values.s3.readinessProbe.enabled }}
readinessProbe:
httpGet:
path: {{ .Values.s3.readinessProbe.httpGet.path }}
path: /status
port: {{ .Values.s3.port }}
scheme: {{ .Values.s3.readinessProbe.scheme }}
initialDelaySeconds: {{ .Values.s3.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.s3.readinessProbe.periodSeconds }}
successThreshold: {{ .Values.s3.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.s3.readinessProbe.failureThreshold }}
timeoutSeconds: {{ .Values.s3.readinessProbe.timeoutSeconds }}
{{- end }}
{{- if .Values.s3.livenessProbe.enabled }}
scheme: HTTP
initialDelaySeconds: 15
periodSeconds: 15
successThreshold: 1
failureThreshold: 100
timeoutSeconds: 10
livenessProbe:
httpGet:
path: {{ .Values.s3.livenessProbe.httpGet.path }}
path: /status
port: {{ .Values.s3.port }}
scheme: {{ .Values.s3.livenessProbe.scheme }}
initialDelaySeconds: {{ .Values.s3.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.s3.livenessProbe.periodSeconds }}
successThreshold: {{ .Values.s3.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.s3.livenessProbe.failureThreshold }}
timeoutSeconds: {{ .Values.s3.livenessProbe.timeoutSeconds }}
{{- end }}
{{- with .Values.s3.resources }}
scheme: HTTP
initialDelaySeconds: 20
periodSeconds: 60
successThreshold: 1
failureThreshold: 20
timeoutSeconds: 10
{{- if .Values.s3.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{ tpl .Values.s3.resources . | nindent 12 | trim }}
{{- end }}
{{- if .Values.s3.containerSecurityContext.enabled }}
securityContext: {{- omit .Values.s3.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
{{- if .Values.s3.sidecars }}
{{- include "common.tplvalues.render" (dict "value" .Values.s3.sidecars "context" $) | nindent 8 }}
{{- end }}
volumes:
{{- if .Values.s3.enableAuth }}
- name: config-users
secret:
defaultMode: 420
{{- if .Values.s3.existingConfigSecret }}
secretName: {{ .Values.s3.existingConfigSecret }}
{{- else }}
secretName: seaweedfs-s3-secret
{{- end }}
{{- end }}
{{- if eq .Values.s3.logs.type "hostPath" }}
- name: logs
hostPath:
path: {{ .Values.s3.logs.hostPathPrefix }}/logs/seaweedfs/s3
type: DirectoryOrCreate
{{- end }}
{{- if eq .Values.s3.logs.type "emptyDir" }}
- name: logs
emptyDir: {}
{{- end }}
{{- if .Values.global.enableSecurity }}
- name: security-config
configMap:

View file

@ -1,46 +0,0 @@
{{- if .Values.s3.ingress.enabled }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }}
apiVersion: networking.k8s.io/v1beta1
{{- else }}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: ingress-{{ template "seaweedfs.name" . }}-s3
namespace: {{ .Release.Namespace }}
{{- with .Values.s3.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: s3
spec:
ingressClassName: {{ .Values.s3.ingress.className | quote }}
tls:
{{ .Values.s3.ingress.tls | default list | toYaml | nindent 6}}
rules:
- http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}
service:
name: {{ template "seaweedfs.name" . }}-s3
port:
number: {{ .Values.s3.port }}
#name:
{{- else }}
serviceName: {{ template "seaweedfs.name" . }}-s3
servicePort: {{ .Values.s3.port }}
{{- end }}
{{- if .Values.s3.ingress.host }}
host: {{ .Values.s3.ingress.host }}
{{- end }}
{{- end }}

View file

@ -1,35 +0,0 @@
{{- if or (and (or .Values.s3.enabled .Values.allInOne.enabled) .Values.s3.enableAuth (not .Values.s3.existingConfigSecret)) (and .Values.filer.s3.enabled .Values.filer.s3.enableAuth (not .Values.filer.s3.existingConfigSecret)) }}
{{- $access_key_admin := include "getOrGeneratePassword" (dict "namespace" .Release.Namespace "secretName" "seaweedfs-s3-secret" "key" "admin_access_key_id" "length" 20) -}}
{{- $secret_key_admin := include "getOrGeneratePassword" (dict "namespace" .Release.Namespace "secretName" "seaweedfs-s3-secret" "key" "admin_secret_access_key" "length" 40) -}}
{{- $access_key_read := include "getOrGeneratePassword" (dict "namespace" .Release.Namespace "secretName" "seaweedfs-s3-secret" "key" "read_access_key_id" "length" 20) -}}
{{- $secret_key_read := include "getOrGeneratePassword" (dict "namespace" .Release.Namespace "secretName" "seaweedfs-s3-secret" "key" "read_secret_access_key" "length" 40) -}}
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: seaweedfs-s3-secret
namespace: {{ .Release.Namespace }}
annotations:
"helm.sh/resource-policy": keep
"helm.sh/hook": "pre-install,pre-upgrade"
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: s3
stringData:
admin_access_key_id: {{ $access_key_admin }}
admin_secret_access_key: {{ $secret_key_admin }}
read_access_key_id: {{ $access_key_read }}
read_secret_access_key: {{ $secret_key_read }}
seaweedfs_s3_config: '{"identities":[{"name":"anvAdmin","credentials":[{"accessKey":"{{ $access_key_admin }}","secretKey":"{{ $secret_key_admin }}"}],"actions":["Admin","Read","Write"]},{"name":"anvReadOnly","credentials":[{"accessKey":"{{ $access_key_read }}","secretKey":"{{ $secret_key_read }}"}],"actions":["Read"]}]}'
{{- if .Values.filer.s3.auditLogConfig }}
filer_s3_auditLogConfig.json: |
{{ toJson .Values.filer.s3.auditLogConfig | nindent 4 }}
{{- end }}
{{- if .Values.s3.auditLogConfig }}
s3_auditLogConfig.json: |
{{ toJson .Values.s3.auditLogConfig | nindent 4 }}
{{- end }}
{{- end }}

Some files were not shown because too many files have changed in this diff Show more