mirror of
https://github.com/chrislusf/seaweedfs
synced 2025-07-30 23:42:48 +02:00
Compare commits
No commits in common. "master" and "1.81" have entirely different histories.
1645 changed files with 36629 additions and 374437 deletions
4
.github/FUNDING.yml
vendored
4
.github/FUNDING.yml
vendored
|
@ -1,4 +0,0 @@
|
|||
# These are supported funding model platforms
|
||||
|
||||
github: chrislusf
|
||||
patreon: seaweedfs
|
|
@ -9,12 +9,11 @@ assignees: ''
|
|||
|
||||
Sponsors SeaweedFS via Patreon https://www.patreon.com/seaweedfs
|
||||
Report issues here. Ask questions here https://stackoverflow.com/questions/tagged/seaweedfs
|
||||
Please ask questions in https://github.com/seaweedfs/seaweedfs/discussions
|
||||
|
||||
example of a good issue report:
|
||||
https://github.com/seaweedfs/seaweedfs/issues/1005
|
||||
https://github.com/chrislusf/seaweedfs/issues/1005
|
||||
example of a bad issue report:
|
||||
https://github.com/seaweedfs/seaweedfs/issues/1008
|
||||
https://github.com/chrislusf/seaweedfs/issues/1008
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
10
.github/dependabot.yml
vendored
10
.github/dependabot.yml
vendored
|
@ -1,10 +0,0 @@
|
|||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
15
.github/pull_request_template.md
vendored
15
.github/pull_request_template.md
vendored
|
@ -1,15 +0,0 @@
|
|||
# What problem are we solving?
|
||||
|
||||
|
||||
|
||||
# How are we solving the problem?
|
||||
|
||||
|
||||
|
||||
# How is the PR tested?
|
||||
|
||||
|
||||
|
||||
# Checks
|
||||
- [ ] I have added unit tests if possible.
|
||||
- [ ] I will add related wiki document changes and link to this PR after merging.
|
124
.github/workflows/binaries_dev.yml
vendored
124
.github/workflows/binaries_dev.yml
vendored
|
@ -1,124 +0,0 @@
|
|||
name: "go: build dev binaries"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
|
||||
cleanup:
|
||||
permissions:
|
||||
contents: write # for mknejp/delete-release-assets to delete release assets
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
|
||||
- name: Delete old release assets
|
||||
uses: mknejp/delete-release-assets@v1
|
||||
with:
|
||||
token: ${{ github.token }}
|
||||
tag: dev
|
||||
fail-if-no-assets: false
|
||||
assets: |
|
||||
weed-*
|
||||
|
||||
build_dev_linux_windows:
|
||||
permissions:
|
||||
contents: write # for wangyoucao577/go-release-action to upload release assets
|
||||
needs: cleanup
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
goos: [linux, windows]
|
||||
goarch: [amd64]
|
||||
|
||||
steps:
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
|
||||
- name: Set BUILD_TIME env
|
||||
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
|
||||
|
||||
- name: Go Release Binaries Large Disk
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
release_tag: dev
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed-large-disk
|
||||
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
||||
|
||||
- name: Go Release Binaries Normal Volume Size
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
release_tag: dev
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed-normal-disk
|
||||
asset_name: "weed-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
||||
|
||||
build_dev_darwin:
|
||||
permissions:
|
||||
contents: write # for wangyoucao577/go-release-action to upload release assets
|
||||
needs: build_dev_linux_windows
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
goos: [darwin]
|
||||
goarch: [amd64, arm64]
|
||||
|
||||
steps:
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
|
||||
- name: Set BUILD_TIME env
|
||||
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
|
||||
|
||||
- name: Go Release Binaries Large Disk
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
release_tag: dev
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed-large-disk
|
||||
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
||||
|
||||
- name: Go Release Binaries Normal Volume Size
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
release_tag: dev
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed-normal-disk
|
||||
asset_name: "weed-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
59
.github/workflows/binaries_release0.yml
vendored
59
.github/workflows/binaries_release0.yml
vendored
|
@ -1,59 +0,0 @@
|
|||
# This is a basic workflow to help you get started with Actions
|
||||
|
||||
name: "go: build versioned binaries for windows"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
|
||||
build-release-binaries_windows:
|
||||
permissions:
|
||||
contents: write # for wangyoucao577/go-release-action to upload release assets
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
goos: [windows]
|
||||
goarch: [amd64]
|
||||
|
||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
- name: Go Release Binaries Normal Volume Size
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
# build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed
|
||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
|
||||
- name: Go Release Large Disk Binaries
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed
|
||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_large_disk"
|
59
.github/workflows/binaries_release1.yml
vendored
59
.github/workflows/binaries_release1.yml
vendored
|
@ -1,59 +0,0 @@
|
|||
# This is a basic workflow to help you get started with Actions
|
||||
|
||||
name: "go: build versioned binaries for linux"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
|
||||
build-release-binaries_linux:
|
||||
permissions:
|
||||
contents: write # for wangyoucao577/go-release-action to upload release assets
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
goos: [linux]
|
||||
goarch: [amd64, arm, arm64]
|
||||
|
||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
- name: Go Release Binaries Normal Volume Size
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
# build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed
|
||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
|
||||
- name: Go Release Large Disk Binaries
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed
|
||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_large_disk"
|
59
.github/workflows/binaries_release2.yml
vendored
59
.github/workflows/binaries_release2.yml
vendored
|
@ -1,59 +0,0 @@
|
|||
# This is a basic workflow to help you get started with Actions
|
||||
|
||||
name: "go: build versioned binaries for darwin"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
|
||||
build-release-binaries_darwin:
|
||||
permissions:
|
||||
contents: write # for wangyoucao577/go-release-action to upload release assets
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
goos: [darwin]
|
||||
goarch: [amd64, arm64]
|
||||
|
||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
- name: Go Release Binaries Normal Volume Size
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
# build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed
|
||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
|
||||
- name: Go Release Large Disk Binaries
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed
|
||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_large_disk"
|
59
.github/workflows/binaries_release3.yml
vendored
59
.github/workflows/binaries_release3.yml
vendored
|
@ -1,59 +0,0 @@
|
|||
# This is a basic workflow to help you get started with Actions
|
||||
|
||||
name: "go: build versioned binaries for freebsd"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
|
||||
build-release-binaries_freebsd:
|
||||
permissions:
|
||||
contents: write # for wangyoucao577/go-release-action to upload release assets
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
goos: [freebsd]
|
||||
goarch: [amd64, arm, arm64]
|
||||
|
||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
- name: Go Release Binaries Normal Volume Size
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
# build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed
|
||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
|
||||
- name: Go Release Large Disk Binaries
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed
|
||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_large_disk"
|
60
.github/workflows/binaries_release4.yml
vendored
60
.github/workflows/binaries_release4.yml
vendored
|
@ -1,60 +0,0 @@
|
|||
# This is a basic workflow to help you get started with Actions
|
||||
|
||||
name: "go: build versioned binaries for linux with all tags"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
|
||||
build-release-binaries_linux:
|
||||
permissions:
|
||||
contents: write # for wangyoucao577/go-release-action to upload release assets
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
goos: [linux]
|
||||
goarch: [amd64]
|
||||
|
||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
- name: Go Release Binaries Normal Volume Size
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
overwrite: true
|
||||
build_flags: -tags elastic,gocdk,rclone,sqlite,tarantool,tikv,ydb
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
# build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed
|
||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_full"
|
||||
- name: Go Release Large Disk Binaries
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
build_flags: -tags 5BytesOffset,elastic,gocdk,rclone,sqlite,tarantool,tikv,ydb
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed
|
||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_full_large_disk"
|
59
.github/workflows/binaries_release5.yml
vendored
59
.github/workflows/binaries_release5.yml
vendored
|
@ -1,59 +0,0 @@
|
|||
# This is a basic workflow to help you get started with Actions
|
||||
|
||||
name: "go: build versioned binaries for openbsd"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
|
||||
build-release-binaries_openbsd:
|
||||
permissions:
|
||||
contents: write # for wangyoucao577/go-release-action to upload release assets
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
goos: [openbsd]
|
||||
goarch: [amd64, arm, arm64]
|
||||
|
||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
- name: Go Release Binaries Normal Volume Size
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
# build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed
|
||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
|
||||
- name: Go Release Large Disk Binaries
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed
|
||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_large_disk"
|
47
.github/workflows/codeql.yml
vendored
47
.github/workflows/codeql.yml
vendored
|
@ -1,47 +0,0 @@
|
|||
name: "Code Scanning - Action"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.head_ref }}/codeql
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
CodeQL-Build:
|
||||
# CodeQL runs on ubuntu-latest, windows-latest, and macos-latest
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
# required for all workflows
|
||||
security-events: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3
|
||||
# Override language selection by uncommenting this and choosing your languages
|
||||
with:
|
||||
languages: go
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below).
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v3
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following
|
||||
# three lines and modify them (or add more) to build your code if your
|
||||
# project uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3
|
66
.github/workflows/container_dev.yml
vendored
66
.github/workflows/container_dev.yml
vendored
|
@ -1,66 +0,0 @@
|
|||
name: "docker: build dev containers"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
workflow_dispatch: {}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
|
||||
build-dev-containers:
|
||||
runs-on: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
-
|
||||
name: Docker meta
|
||||
id: docker_meta
|
||||
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v3
|
||||
with:
|
||||
images: |
|
||||
chrislusf/seaweedfs
|
||||
ghcr.io/chrislusf/seaweedfs
|
||||
tags: |
|
||||
type=raw,value=dev
|
||||
labels: |
|
||||
org.opencontainers.image.title=seaweedfs
|
||||
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
|
||||
org.opencontainers.image.vendor=Chris Lu
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
|
||||
with:
|
||||
buildkitd-flags: "--debug"
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
-
|
||||
name: Login to GHCR
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ secrets.GHCR_USERNAME }}
|
||||
password: ${{ secrets.GHCR_TOKEN }}
|
||||
-
|
||||
name: Build
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2
|
||||
with:
|
||||
context: ./docker
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
file: ./docker/Dockerfile.go_build
|
||||
platforms: linux/amd64, linux/arm64
|
||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
67
.github/workflows/container_latest.yml
vendored
67
.github/workflows/container_latest.yml
vendored
|
@ -1,67 +0,0 @@
|
|||
name: "docker: build latest container"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
workflow_dispatch: {}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
|
||||
build-latest-container:
|
||||
runs-on: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
-
|
||||
name: Docker meta
|
||||
id: docker_meta
|
||||
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v3
|
||||
with:
|
||||
images: |
|
||||
chrislusf/seaweedfs
|
||||
ghcr.io/chrislusf/seaweedfs
|
||||
tags: |
|
||||
type=raw,value=latest
|
||||
labels: |
|
||||
org.opencontainers.image.title=seaweedfs
|
||||
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
|
||||
org.opencontainers.image.vendor=Chris Lu
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
|
||||
with:
|
||||
buildkitd-flags: "--debug"
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
-
|
||||
name: Login to GHCR
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ secrets.GHCR_USERNAME }}
|
||||
password: ${{ secrets.GHCR_TOKEN }}
|
||||
-
|
||||
name: Build
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2
|
||||
with:
|
||||
context: ./docker
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
file: ./docker/Dockerfile.go_build
|
||||
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
|
||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
57
.github/workflows/container_release1.yml
vendored
57
.github/workflows/container_release1.yml
vendored
|
@ -1,57 +0,0 @@
|
|||
name: "docker: build release containers for normal volume"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
workflow_dispatch: {}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-default-release-container:
|
||||
runs-on: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
-
|
||||
name: Docker meta
|
||||
id: docker_meta
|
||||
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v3
|
||||
with:
|
||||
images: |
|
||||
chrislusf/seaweedfs
|
||||
tags: |
|
||||
type=ref,event=tag
|
||||
flavor: |
|
||||
latest=false
|
||||
labels: |
|
||||
org.opencontainers.image.title=seaweedfs
|
||||
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
|
||||
org.opencontainers.image.vendor=Chris Lu
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
-
|
||||
name: Build
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2
|
||||
with:
|
||||
context: ./docker
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
file: ./docker/Dockerfile.go_build
|
||||
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
|
||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
59
.github/workflows/container_release2.yml
vendored
59
.github/workflows/container_release2.yml
vendored
|
@ -1,59 +0,0 @@
|
|||
name: "docker: build release containers for large volume"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
workflow_dispatch: {}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
|
||||
build-large-release-container:
|
||||
runs-on: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
-
|
||||
name: Docker meta
|
||||
id: docker_meta
|
||||
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v3
|
||||
with:
|
||||
images: |
|
||||
chrislusf/seaweedfs
|
||||
tags: |
|
||||
type=ref,event=tag,suffix=_large_disk
|
||||
flavor: |
|
||||
latest=false
|
||||
labels: |
|
||||
org.opencontainers.image.title=seaweedfs
|
||||
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
|
||||
org.opencontainers.image.vendor=Chris Lu
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
-
|
||||
name: Build
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2
|
||||
with:
|
||||
context: ./docker
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
file: ./docker/Dockerfile.go_build
|
||||
build-args: TAGS=5BytesOffset
|
||||
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
|
||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
58
.github/workflows/container_release3.yml
vendored
58
.github/workflows/container_release3.yml
vendored
|
@ -1,58 +0,0 @@
|
|||
name: "docker: build release containers for rocksdb"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
workflow_dispatch: {}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
|
||||
build-large-release-container_rocksdb:
|
||||
runs-on: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
-
|
||||
name: Docker meta
|
||||
id: docker_meta
|
||||
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v3
|
||||
with:
|
||||
images: |
|
||||
chrislusf/seaweedfs
|
||||
tags: |
|
||||
type=ref,event=tag,suffix=_large_disk_rocksdb
|
||||
flavor: |
|
||||
latest=false
|
||||
labels: |
|
||||
org.opencontainers.image.title=seaweedfs
|
||||
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
|
||||
org.opencontainers.image.vendor=Chris Lu
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
-
|
||||
name: Build
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2
|
||||
with:
|
||||
context: ./docker
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
file: ./docker/Dockerfile.rocksdb_large
|
||||
platforms: linux/amd64
|
||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
58
.github/workflows/container_release4.yml
vendored
58
.github/workflows/container_release4.yml
vendored
|
@ -1,58 +0,0 @@
|
|||
name: "docker: build release containers for all tags"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
workflow_dispatch: {}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-default-release-container:
|
||||
runs-on: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
-
|
||||
name: Docker meta
|
||||
id: docker_meta
|
||||
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v3
|
||||
with:
|
||||
images: |
|
||||
chrislusf/seaweedfs
|
||||
tags: |
|
||||
type=ref,event=tag,suffix=_full
|
||||
flavor: |
|
||||
latest=false
|
||||
labels: |
|
||||
org.opencontainers.image.title=seaweedfs
|
||||
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
|
||||
org.opencontainers.image.vendor=Chris Lu
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
-
|
||||
name: Build
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2
|
||||
with:
|
||||
context: ./docker
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
file: ./docker/Dockerfile.go_build
|
||||
build-args: TAGS=elastic,gocdk,rclone,sqlite,tarantool,tikv,ydb
|
||||
platforms: linux/amd64
|
||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
58
.github/workflows/container_release5.yml
vendored
58
.github/workflows/container_release5.yml
vendored
|
@ -1,58 +0,0 @@
|
|||
name: "docker: build release containers for all tags and large volume"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
workflow_dispatch: {}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-default-release-container:
|
||||
runs-on: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
-
|
||||
name: Docker meta
|
||||
id: docker_meta
|
||||
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v3
|
||||
with:
|
||||
images: |
|
||||
chrislusf/seaweedfs
|
||||
tags: |
|
||||
type=ref,event=tag,suffix=_large_disk_full
|
||||
flavor: |
|
||||
latest=false
|
||||
labels: |
|
||||
org.opencontainers.image.title=seaweedfs
|
||||
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
|
||||
org.opencontainers.image.vendor=Chris Lu
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
-
|
||||
name: Build
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2
|
||||
with:
|
||||
context: ./docker
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
file: ./docker/Dockerfile.go_build
|
||||
build-args: TAGS=5BytesOffset,elastic,gocdk,rclone,sqlite,tarantool,tikv,ydb
|
||||
platforms: linux/amd64
|
||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
171
.github/workflows/deploy_telemetry.yml
vendored
171
.github/workflows/deploy_telemetry.yml
vendored
|
@ -1,171 +0,0 @@
|
|||
# This workflow will build and deploy the SeaweedFS telemetry server
|
||||
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go
|
||||
|
||||
name: Deploy Telemetry Server
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
setup:
|
||||
description: 'Run first-time server setup'
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
deploy:
|
||||
description: 'Deploy telemetry server to remote server'
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.24'
|
||||
|
||||
- name: Build Telemetry Server
|
||||
if: github.event_name == 'workflow_dispatch' && inputs.deploy
|
||||
run: |
|
||||
go mod tidy
|
||||
echo "Building telemetry server..."
|
||||
GOOS=linux GOARCH=amd64 go build -o telemetry-server ./telemetry/server/main.go
|
||||
ls -la telemetry-server
|
||||
echo "Build completed successfully"
|
||||
|
||||
- name: First-time Server Setup
|
||||
if: github.event_name == 'workflow_dispatch' && inputs.setup
|
||||
env:
|
||||
SSH_PRIVATE_KEY: ${{ secrets.TELEMETRY_SSH_PRIVATE_KEY }}
|
||||
REMOTE_HOST: ${{ secrets.TELEMETRY_HOST }}
|
||||
REMOTE_USER: ${{ secrets.TELEMETRY_USER }}
|
||||
run: |
|
||||
mkdir -p ~/.ssh
|
||||
echo "$SSH_PRIVATE_KEY" > ~/.ssh/deploy_key
|
||||
chmod 600 ~/.ssh/deploy_key
|
||||
echo "Host *" > ~/.ssh/config
|
||||
echo " StrictHostKeyChecking no" >> ~/.ssh/config
|
||||
|
||||
# Create all required directories with proper permissions
|
||||
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "
|
||||
mkdir -p ~/seaweedfs-telemetry/bin ~/seaweedfs-telemetry/logs ~/seaweedfs-telemetry/data ~/seaweedfs-telemetry/tmp && \
|
||||
chmod 755 ~/seaweedfs-telemetry/logs && \
|
||||
chmod 755 ~/seaweedfs-telemetry/data && \
|
||||
touch ~/seaweedfs-telemetry/logs/telemetry.log ~/seaweedfs-telemetry/logs/telemetry.error.log && \
|
||||
chmod 644 ~/seaweedfs-telemetry/logs/*.log"
|
||||
|
||||
# Create systemd service file
|
||||
echo "
|
||||
[Unit]
|
||||
Description=SeaweedFS Telemetry Server
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=$REMOTE_USER
|
||||
WorkingDirectory=/home/$REMOTE_USER/seaweedfs-telemetry
|
||||
ExecStart=/home/$REMOTE_USER/seaweedfs-telemetry/bin/telemetry-server -port=8353
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=append:/home/$REMOTE_USER/seaweedfs-telemetry/logs/telemetry.log
|
||||
StandardError=append:/home/$REMOTE_USER/seaweedfs-telemetry/logs/telemetry.error.log
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target" > telemetry.service
|
||||
|
||||
# Setup logrotate configuration
|
||||
echo "# SeaweedFS Telemetry service log rotation
|
||||
/home/$REMOTE_USER/seaweedfs-telemetry/logs/*.log {
|
||||
daily
|
||||
rotate 30
|
||||
compress
|
||||
delaycompress
|
||||
missingok
|
||||
notifempty
|
||||
create 644 $REMOTE_USER $REMOTE_USER
|
||||
postrotate
|
||||
systemctl restart telemetry.service
|
||||
endscript
|
||||
}" > telemetry_logrotate
|
||||
|
||||
# Copy configuration files
|
||||
scp -i ~/.ssh/deploy_key telemetry/grafana-dashboard.json $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
|
||||
scp -i ~/.ssh/deploy_key telemetry/prometheus.yml $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
|
||||
|
||||
# Copy and install service and logrotate files
|
||||
scp -i ~/.ssh/deploy_key telemetry.service telemetry_logrotate $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
|
||||
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "
|
||||
sudo mv ~/seaweedfs-telemetry/telemetry.service /etc/systemd/system/ && \
|
||||
sudo mv ~/seaweedfs-telemetry/telemetry_logrotate /etc/logrotate.d/seaweedfs-telemetry && \
|
||||
sudo systemctl daemon-reload && \
|
||||
sudo systemctl enable telemetry.service"
|
||||
|
||||
echo "✅ First-time setup completed successfully!"
|
||||
echo "📋 Next step: Run the deployment to install the telemetry server binary"
|
||||
echo " 1. Go to GitHub Actions → Deploy Telemetry Server"
|
||||
echo " 2. Click 'Run workflow'"
|
||||
echo " 3. Check 'Deploy telemetry server to remote server'"
|
||||
echo " 4. Click 'Run workflow'"
|
||||
|
||||
rm -f ~/.ssh/deploy_key
|
||||
|
||||
- name: Deploy Telemetry Server to Remote Server
|
||||
if: github.event_name == 'workflow_dispatch' && inputs.deploy
|
||||
env:
|
||||
SSH_PRIVATE_KEY: ${{ secrets.TELEMETRY_SSH_PRIVATE_KEY }}
|
||||
REMOTE_HOST: ${{ secrets.TELEMETRY_HOST }}
|
||||
REMOTE_USER: ${{ secrets.TELEMETRY_USER }}
|
||||
run: |
|
||||
mkdir -p ~/.ssh
|
||||
echo "$SSH_PRIVATE_KEY" > ~/.ssh/deploy_key
|
||||
chmod 600 ~/.ssh/deploy_key
|
||||
echo "Host *" > ~/.ssh/config
|
||||
echo " StrictHostKeyChecking no" >> ~/.ssh/config
|
||||
|
||||
# Create temp directory and copy binary
|
||||
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "mkdir -p ~/seaweedfs-telemetry/tmp"
|
||||
scp -i ~/.ssh/deploy_key telemetry-server $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/tmp/
|
||||
|
||||
# Copy updated configuration files
|
||||
scp -i ~/.ssh/deploy_key telemetry/grafana-dashboard.json $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
|
||||
scp -i ~/.ssh/deploy_key telemetry/prometheus.yml $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
|
||||
|
||||
# Check if service exists and deploy accordingly
|
||||
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "
|
||||
if systemctl list-unit-files telemetry.service >/dev/null 2>&1; then
|
||||
echo 'Service exists, performing update...'
|
||||
sudo systemctl stop telemetry.service
|
||||
mkdir -p ~/seaweedfs-telemetry/bin
|
||||
mv ~/seaweedfs-telemetry/tmp/telemetry-server ~/seaweedfs-telemetry/bin/
|
||||
chmod +x ~/seaweedfs-telemetry/bin/telemetry-server
|
||||
sudo systemctl start telemetry.service
|
||||
sudo systemctl status telemetry.service
|
||||
else
|
||||
echo 'ERROR: telemetry.service not found!'
|
||||
echo 'Please run the first-time setup before deploying.'
|
||||
echo 'Go to GitHub Actions → Deploy Telemetry Server → Run workflow → Check \"Run first-time server setup\"'
|
||||
exit 1
|
||||
fi"
|
||||
|
||||
# Verify deployment
|
||||
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "
|
||||
echo 'Waiting for service to start...'
|
||||
sleep 5
|
||||
curl -f http://localhost:8353/health || echo 'Health check failed'"
|
||||
|
||||
rm -f ~/.ssh/deploy_key
|
||||
|
||||
- name: Notify Deployment Status
|
||||
if: always()
|
||||
run: |
|
||||
if [ "${{ job.status }}" == "success" ]; then
|
||||
echo "✅ Telemetry server deployment successful"
|
||||
echo "Dashboard: http://${{ secrets.TELEMETRY_HOST }}:8353"
|
||||
echo "Metrics: http://${{ secrets.TELEMETRY_HOST }}:8353/metrics"
|
||||
else
|
||||
echo "❌ Telemetry server deployment failed"
|
||||
fi
|
14
.github/workflows/depsreview.yml
vendored
14
.github/workflows/depsreview.yml
vendored
|
@ -1,14 +0,0 @@
|
|||
name: 'Dependency Review'
|
||||
on: [pull_request]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
dependency-review:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: 'Checkout Repository'
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
- name: 'Dependency Review'
|
||||
uses: actions/dependency-review-action@da24556b548a50705dd671f47852072ea4c105d9
|
104
.github/workflows/e2e.yml
vendored
104
.github/workflows/e2e.yml
vendored
|
@ -1,104 +0,0 @@
|
|||
name: "End to End"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.head_ref }}/e2e
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: docker
|
||||
|
||||
jobs:
|
||||
e2e:
|
||||
name: FUSE Mount
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@8e57b58e57be52ac95949151e2777ffda8501267 # v2
|
||||
with:
|
||||
go-version: ^1.13
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y fuse
|
||||
|
||||
- name: Start SeaweedFS
|
||||
timeout-minutes: 5
|
||||
run: make build_e2e && docker compose -f ./compose/e2e-mount.yml up --wait
|
||||
|
||||
- name: Run FIO 4k
|
||||
timeout-minutes: 15
|
||||
run: |
|
||||
echo "Starting FIO at: $(date)"
|
||||
# Concurrent r/w
|
||||
echo 'Run randrw with size=16M bs=4k'
|
||||
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randrw --bs=4k --direct=1 --numjobs=8 --ioengine=libaio --group_reporting --runtime=30 --time_based=1
|
||||
|
||||
echo "Verify FIO at: $(date)"
|
||||
# Verified write
|
||||
echo 'Run randwrite with size=16M bs=4k'
|
||||
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randwrite --bs=4k --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1 --do_verify=0 --verify=crc32c --verify_backlog=1
|
||||
|
||||
- name: Run FIO 128k
|
||||
timeout-minutes: 15
|
||||
run: |
|
||||
echo "Starting FIO at: $(date)"
|
||||
# Concurrent r/w
|
||||
echo 'Run randrw with size=16M bs=128k'
|
||||
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randrw --bs=128k --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1
|
||||
|
||||
echo "Verify FIO at: $(date)"
|
||||
# Verified write
|
||||
echo 'Run randwrite with size=16M bs=128k'
|
||||
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randwrite --bs=128k --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1 --do_verify=0 --verify=crc32c --verify_backlog=1
|
||||
|
||||
- name: Run FIO 1MB
|
||||
timeout-minutes: 15
|
||||
run: |
|
||||
echo "Starting FIO at: $(date)"
|
||||
# Concurrent r/w
|
||||
echo 'Run randrw with size=16M bs=1m'
|
||||
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randrw --bs=1m --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1
|
||||
|
||||
echo "Verify FIO at: $(date)"
|
||||
# Verified write
|
||||
echo 'Run randwrite with size=16M bs=1m'
|
||||
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randwrite --bs=1m --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1 --do_verify=0 --verify=crc32c --verify_backlog=1
|
||||
|
||||
- name: Save logs
|
||||
if: always()
|
||||
run: |
|
||||
docker compose -f ./compose/e2e-mount.yml logs > output.log
|
||||
echo 'Showing last 500 log lines of mount service:'
|
||||
docker compose -f ./compose/e2e-mount.yml logs --tail 500 mount
|
||||
|
||||
- name: Check for data races
|
||||
if: always()
|
||||
continue-on-error: true # TODO: remove this comment to enable build failure on data races (after all are fixed)
|
||||
run: grep -A50 'DATA RACE' output.log && exit 1 || exit 0
|
||||
|
||||
- name: Archive logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: output-logs
|
||||
path: docker/output.log
|
||||
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: docker compose -f ./compose/e2e-mount.yml down --volumes --remove-orphans --rmi all
|
234
.github/workflows/fuse-integration.yml
vendored
234
.github/workflows/fuse-integration.yml
vendored
|
@ -1,234 +0,0 @@
|
|||
name: "FUSE Integration Tests"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, main ]
|
||||
paths:
|
||||
- 'weed/**'
|
||||
- 'test/fuse_integration/**'
|
||||
- '.github/workflows/fuse-integration.yml'
|
||||
pull_request:
|
||||
branches: [ master, main ]
|
||||
paths:
|
||||
- 'weed/**'
|
||||
- 'test/fuse_integration/**'
|
||||
- '.github/workflows/fuse-integration.yml'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.head_ref }}/fuse-integration
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
GO_VERSION: '1.21'
|
||||
TEST_TIMEOUT: '45m'
|
||||
|
||||
jobs:
|
||||
fuse-integration:
|
||||
name: FUSE Integration Testing
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 50
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go ${{ env.GO_VERSION }}
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- name: Install FUSE and dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y fuse libfuse-dev
|
||||
# Verify FUSE installation
|
||||
fusermount --version || true
|
||||
ls -la /dev/fuse || true
|
||||
|
||||
- name: Build SeaweedFS
|
||||
run: |
|
||||
cd weed
|
||||
go build -tags "elastic gocdk sqlite ydb tarantool tikv rclone" -v .
|
||||
chmod +x weed
|
||||
# Verify binary
|
||||
./weed version
|
||||
|
||||
- name: Prepare FUSE Integration Tests
|
||||
run: |
|
||||
# Create isolated test directory to avoid Go module conflicts
|
||||
mkdir -p /tmp/seaweedfs-fuse-tests
|
||||
|
||||
# Copy only the working test files to avoid Go module conflicts
|
||||
# These are the files we've verified work without package name issues
|
||||
cp test/fuse_integration/simple_test.go /tmp/seaweedfs-fuse-tests/ 2>/dev/null || echo "⚠️ simple_test.go not found"
|
||||
cp test/fuse_integration/working_demo_test.go /tmp/seaweedfs-fuse-tests/ 2>/dev/null || echo "⚠️ working_demo_test.go not found"
|
||||
|
||||
# Note: Other test files (framework.go, basic_operations_test.go, etc.)
|
||||
# have Go module conflicts and are skipped until resolved
|
||||
|
||||
echo "📁 Working test files copied:"
|
||||
ls -la /tmp/seaweedfs-fuse-tests/*.go 2>/dev/null || echo "ℹ️ No test files found"
|
||||
|
||||
# Initialize Go module in isolated directory
|
||||
cd /tmp/seaweedfs-fuse-tests
|
||||
go mod init seaweedfs-fuse-tests
|
||||
go mod tidy
|
||||
|
||||
# Verify setup
|
||||
echo "✅ FUSE integration test environment prepared"
|
||||
ls -la /tmp/seaweedfs-fuse-tests/
|
||||
|
||||
echo ""
|
||||
echo "ℹ️ Current Status: Running working subset of FUSE tests"
|
||||
echo " • simple_test.go: Package structure verification"
|
||||
echo " • working_demo_test.go: Framework capability demonstration"
|
||||
echo " • Full framework: Available in test/fuse_integration/ (module conflicts pending resolution)"
|
||||
|
||||
- name: Run FUSE Integration Tests
|
||||
run: |
|
||||
cd /tmp/seaweedfs-fuse-tests
|
||||
|
||||
echo "🧪 Running FUSE integration tests..."
|
||||
echo "============================================"
|
||||
|
||||
# Run available working test files
|
||||
TESTS_RUN=0
|
||||
|
||||
if [ -f "simple_test.go" ]; then
|
||||
echo "📋 Running simple_test.go..."
|
||||
go test -v -timeout=${{ env.TEST_TIMEOUT }} simple_test.go
|
||||
TESTS_RUN=$((TESTS_RUN + 1))
|
||||
fi
|
||||
|
||||
if [ -f "working_demo_test.go" ]; then
|
||||
echo "📋 Running working_demo_test.go..."
|
||||
go test -v -timeout=${{ env.TEST_TIMEOUT }} working_demo_test.go
|
||||
TESTS_RUN=$((TESTS_RUN + 1))
|
||||
fi
|
||||
|
||||
# Run combined test if multiple files exist
|
||||
if [ -f "simple_test.go" ] && [ -f "working_demo_test.go" ]; then
|
||||
echo "📋 Running combined tests..."
|
||||
go test -v -timeout=${{ env.TEST_TIMEOUT }} simple_test.go working_demo_test.go
|
||||
fi
|
||||
|
||||
if [ $TESTS_RUN -eq 0 ]; then
|
||||
echo "⚠️ No working test files found, running module verification only"
|
||||
go version
|
||||
go mod verify
|
||||
else
|
||||
echo "✅ Successfully ran $TESTS_RUN test file(s)"
|
||||
fi
|
||||
|
||||
echo "============================================"
|
||||
echo "✅ FUSE integration tests completed"
|
||||
|
||||
- name: Run Extended Framework Validation
|
||||
run: |
|
||||
cd /tmp/seaweedfs-fuse-tests
|
||||
|
||||
echo "🔍 Running extended framework validation..."
|
||||
echo "============================================"
|
||||
|
||||
# Test individual components (only run tests that exist)
|
||||
if [ -f "simple_test.go" ]; then
|
||||
echo "Testing simple verification..."
|
||||
go test -v simple_test.go
|
||||
fi
|
||||
|
||||
if [ -f "working_demo_test.go" ]; then
|
||||
echo "Testing framework demo..."
|
||||
go test -v working_demo_test.go
|
||||
fi
|
||||
|
||||
# Test combined execution if both files exist
|
||||
if [ -f "simple_test.go" ] && [ -f "working_demo_test.go" ]; then
|
||||
echo "Testing combined execution..."
|
||||
go test -v simple_test.go working_demo_test.go
|
||||
elif [ -f "simple_test.go" ] || [ -f "working_demo_test.go" ]; then
|
||||
echo "✅ Individual tests already validated above"
|
||||
else
|
||||
echo "⚠️ No working test files found for combined testing"
|
||||
fi
|
||||
|
||||
echo "============================================"
|
||||
echo "✅ Extended validation completed"
|
||||
|
||||
- name: Generate Test Coverage Report
|
||||
run: |
|
||||
cd /tmp/seaweedfs-fuse-tests
|
||||
|
||||
echo "📊 Generating test coverage report..."
|
||||
go test -v -coverprofile=coverage.out .
|
||||
go tool cover -html=coverage.out -o coverage.html
|
||||
|
||||
echo "Coverage report generated: coverage.html"
|
||||
|
||||
- name: Verify SeaweedFS Binary Integration
|
||||
run: |
|
||||
# Test that SeaweedFS binary is accessible from test environment
|
||||
WEED_BINARY=$(pwd)/weed/weed
|
||||
|
||||
if [ -f "$WEED_BINARY" ]; then
|
||||
echo "✅ SeaweedFS binary found at: $WEED_BINARY"
|
||||
$WEED_BINARY version
|
||||
echo "Binary is ready for full integration testing"
|
||||
else
|
||||
echo "❌ SeaweedFS binary not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Upload Test Artifacts
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: fuse-integration-test-results
|
||||
path: |
|
||||
/tmp/seaweedfs-fuse-tests/coverage.out
|
||||
/tmp/seaweedfs-fuse-tests/coverage.html
|
||||
/tmp/seaweedfs-fuse-tests/*.log
|
||||
retention-days: 7
|
||||
|
||||
- name: Test Summary
|
||||
if: always()
|
||||
run: |
|
||||
echo "## 🚀 FUSE Integration Test Summary" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### Framework Status" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- ✅ **Framework Design**: Complete and validated" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- ✅ **Working Tests**: Core framework demonstration functional" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- ⚠️ **Full Framework**: Available but requires Go module resolution" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- ✅ **CI/CD Integration**: Automated testing pipeline established" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### Test Capabilities" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- 📁 **File Operations**: Create, read, write, delete, permissions" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- 📂 **Directory Operations**: Create, list, delete, nested structures" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- 📊 **Large Files**: Multi-megabyte file handling" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- 🔄 **Concurrent Operations**: Multi-threaded stress testing" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- ⚠️ **Error Scenarios**: Comprehensive error handling validation" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### Comparison with Current Tests" >> $GITHUB_STEP_SUMMARY
|
||||
echo "| Aspect | Current (FIO) | This Framework |" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|--------|---------------|----------------|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "| **Scope** | Performance only | Functional + Performance |" >> $GITHUB_STEP_SUMMARY
|
||||
echo "| **Operations** | Read/Write only | All FUSE operations |" >> $GITHUB_STEP_SUMMARY
|
||||
echo "| **Concurrency** | Single-threaded | Multi-threaded stress tests |" >> $GITHUB_STEP_SUMMARY
|
||||
echo "| **Automation** | Manual setup | Fully automated |" >> $GITHUB_STEP_SUMMARY
|
||||
echo "| **Validation** | Speed metrics | Correctness + Performance |" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### Current Working Tests" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- ✅ **Framework Structure**: Package and module verification" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- ✅ **Configuration Management**: Test config validation" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- ✅ **File Operations Demo**: Basic file create/read/write simulation" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- ✅ **Large File Handling**: 1MB+ file processing demonstration" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- ✅ **Concurrency Simulation**: Multi-file operation testing" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### Next Steps" >> $GITHUB_STEP_SUMMARY
|
||||
echo "1. **Module Resolution**: Fix Go package conflicts for full framework" >> $GITHUB_STEP_SUMMARY
|
||||
echo "2. **SeaweedFS Integration**: Connect with real cluster for end-to-end testing" >> $GITHUB_STEP_SUMMARY
|
||||
echo "3. **Performance Benchmarks**: Add performance regression testing" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "📈 **Total Framework Size**: ~1,500 lines of comprehensive testing infrastructure" >> $GITHUB_STEP_SUMMARY
|
21
.github/workflows/go.yml
vendored
21
.github/workflows/go.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "go: build binary"
|
||||
name: Go
|
||||
|
||||
on:
|
||||
push:
|
||||
|
@ -6,13 +6,6 @@ on:
|
|||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.head_ref }}/go
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
|
@ -21,20 +14,24 @@ jobs:
|
|||
steps:
|
||||
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@8e57b58e57be52ac95949151e2777ffda8501267 # v2
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ^1.13
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Get dependencies
|
||||
run: |
|
||||
cd weed; go get -v -t -d ./...
|
||||
if [ -f Gopkg.toml ]; then
|
||||
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
|
||||
dep ensure
|
||||
fi
|
||||
|
||||
- name: Build
|
||||
run: cd weed; go build -tags "elastic gocdk sqlite ydb tarantool tikv rclone" -v .
|
||||
run: cd weed; go build -v .
|
||||
|
||||
- name: Test
|
||||
run: cd weed; go test -tags "elastic gocdk sqlite ydb tarantool tikv rclone" -v ./...
|
||||
run: cd weed; go test -v .
|
||||
|
|
23
.github/workflows/helm_chart_release.yml
vendored
23
.github/workflows/helm_chart_release.yml
vendored
|
@ -1,23 +0,0 @@
|
|||
name: "helm: publish charts"
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pages: write
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
- name: Publish Helm charts
|
||||
uses: stefanprodan/helm-gh-pages@master
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
charts_dir: k8s/charts
|
||||
target_dir: helm
|
||||
branch: gh-pages
|
||||
helm_version: v3.18.4
|
51
.github/workflows/helm_ci.yml
vendored
51
.github/workflows/helm_ci.yml
vendored
|
@ -1,51 +0,0 @@
|
|||
name: "helm: lint and test charts"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths: ['k8s/**']
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
paths: ['k8s/**']
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
lint-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4
|
||||
with:
|
||||
version: v3.18.4
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.9'
|
||||
check-latest: true
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.7.0
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
run: |
|
||||
changed=$(ct list-changed --target-branch ${{ github.event.repository.default_branch }} --chart-dirs k8s/charts)
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "::set-output name=changed::true"
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
run: ct lint --target-branch ${{ github.event.repository.default_branch }} --all --validate-maintainers=false --chart-dirs k8s/charts
|
||||
|
||||
- name: Create kind cluster
|
||||
uses: helm/kind-action@v1.12.0
|
||||
|
||||
- name: Run chart-testing (install)
|
||||
run: ct install --target-branch ${{ github.event.repository.default_branch }} --all --chart-dirs k8s/charts
|
412
.github/workflows/s3-go-tests.yml
vendored
412
.github/workflows/s3-go-tests.yml
vendored
|
@ -1,412 +0,0 @@
|
|||
name: "S3 Go Tests"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.head_ref }}/s3-go-tests
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: weed
|
||||
|
||||
jobs:
|
||||
s3-versioning-tests:
|
||||
name: S3 Versioning Tests
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
matrix:
|
||||
test-type: ["quick", "comprehensive"]
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
id: go
|
||||
|
||||
- name: Install SeaweedFS
|
||||
run: |
|
||||
go install -buildvcs=false
|
||||
|
||||
- name: Run S3 Versioning Tests - ${{ matrix.test-type }}
|
||||
timeout-minutes: 25
|
||||
working-directory: test/s3/versioning
|
||||
run: |
|
||||
set -x
|
||||
echo "=== System Information ==="
|
||||
uname -a
|
||||
free -h
|
||||
df -h
|
||||
echo "=== Starting Tests ==="
|
||||
|
||||
# Run tests with automatic server management
|
||||
# The test-with-server target handles server startup/shutdown automatically
|
||||
if [ "${{ matrix.test-type }}" = "quick" ]; then
|
||||
# Override TEST_PATTERN for quick tests only
|
||||
make test-with-server TEST_PATTERN="TestBucketListReturnDataVersioning|TestVersioningBasicWorkflow|TestVersioningDeleteMarkers"
|
||||
else
|
||||
# Run all versioning tests
|
||||
make test-with-server
|
||||
fi
|
||||
|
||||
- name: Show server logs on failure
|
||||
if: failure()
|
||||
working-directory: test/s3/versioning
|
||||
run: |
|
||||
echo "=== Server Logs ==="
|
||||
if [ -f weed-test.log ]; then
|
||||
echo "Last 100 lines of server logs:"
|
||||
tail -100 weed-test.log
|
||||
else
|
||||
echo "No server log file found"
|
||||
fi
|
||||
|
||||
echo "=== Test Environment ==="
|
||||
ps aux | grep -E "(weed|test)" || true
|
||||
netstat -tlnp | grep -E "(8333|9333|8080)" || true
|
||||
|
||||
- name: Upload test logs on failure
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: s3-versioning-test-logs-${{ matrix.test-type }}
|
||||
path: test/s3/versioning/weed-test*.log
|
||||
retention-days: 3
|
||||
|
||||
s3-versioning-compatibility:
|
||||
name: S3 Versioning Compatibility Test
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 20
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
id: go
|
||||
|
||||
- name: Install SeaweedFS
|
||||
run: |
|
||||
go install -buildvcs=false
|
||||
|
||||
- name: Run Core Versioning Test (Python s3tests equivalent)
|
||||
timeout-minutes: 15
|
||||
working-directory: test/s3/versioning
|
||||
run: |
|
||||
set -x
|
||||
echo "=== System Information ==="
|
||||
uname -a
|
||||
free -h
|
||||
|
||||
# Run the specific test that is equivalent to the Python s3tests
|
||||
make test-with-server || {
|
||||
echo "❌ Test failed, checking logs..."
|
||||
if [ -f weed-test.log ]; then
|
||||
echo "=== Server logs ==="
|
||||
tail -100 weed-test.log
|
||||
fi
|
||||
echo "=== Process information ==="
|
||||
ps aux | grep -E "(weed|test)" || true
|
||||
exit 1
|
||||
}
|
||||
|
||||
- name: Upload server logs on failure
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: s3-versioning-compatibility-logs
|
||||
path: test/s3/versioning/weed-test*.log
|
||||
retention-days: 3
|
||||
|
||||
s3-cors-compatibility:
|
||||
name: S3 CORS Compatibility Test
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 20
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
id: go
|
||||
|
||||
- name: Install SeaweedFS
|
||||
run: |
|
||||
go install -buildvcs=false
|
||||
|
||||
- name: Run Core CORS Test (AWS S3 compatible)
|
||||
timeout-minutes: 15
|
||||
working-directory: test/s3/cors
|
||||
run: |
|
||||
set -x
|
||||
echo "=== System Information ==="
|
||||
uname -a
|
||||
free -h
|
||||
|
||||
# Run the specific test that is equivalent to AWS S3 CORS behavior
|
||||
make test-with-server || {
|
||||
echo "❌ Test failed, checking logs..."
|
||||
if [ -f weed-test.log ]; then
|
||||
echo "=== Server logs ==="
|
||||
tail -100 weed-test.log
|
||||
fi
|
||||
echo "=== Process information ==="
|
||||
ps aux | grep -E "(weed|test)" || true
|
||||
exit 1
|
||||
}
|
||||
|
||||
- name: Upload server logs on failure
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: s3-cors-compatibility-logs
|
||||
path: test/s3/cors/weed-test*.log
|
||||
retention-days: 3
|
||||
|
||||
s3-retention-tests:
|
||||
name: S3 Retention Tests
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
matrix:
|
||||
test-type: ["quick", "comprehensive"]
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
id: go
|
||||
|
||||
- name: Install SeaweedFS
|
||||
run: |
|
||||
go install -buildvcs=false
|
||||
|
||||
- name: Run S3 Retention Tests - ${{ matrix.test-type }}
|
||||
timeout-minutes: 25
|
||||
working-directory: test/s3/retention
|
||||
run: |
|
||||
set -x
|
||||
echo "=== System Information ==="
|
||||
uname -a
|
||||
free -h
|
||||
df -h
|
||||
echo "=== Starting Tests ==="
|
||||
|
||||
# Run tests with automatic server management
|
||||
# The test-with-server target handles server startup/shutdown automatically
|
||||
if [ "${{ matrix.test-type }}" = "quick" ]; then
|
||||
# Override TEST_PATTERN for quick tests only
|
||||
make test-with-server TEST_PATTERN="TestBasicRetentionWorkflow|TestRetentionModeCompliance|TestLegalHoldWorkflow"
|
||||
else
|
||||
# Run all retention tests
|
||||
make test-with-server
|
||||
fi
|
||||
|
||||
- name: Show server logs on failure
|
||||
if: failure()
|
||||
working-directory: test/s3/retention
|
||||
run: |
|
||||
echo "=== Server Logs ==="
|
||||
if [ -f weed-test.log ]; then
|
||||
echo "Last 100 lines of server logs:"
|
||||
tail -100 weed-test.log
|
||||
else
|
||||
echo "No server log file found"
|
||||
fi
|
||||
|
||||
echo "=== Test Environment ==="
|
||||
ps aux | grep -E "(weed|test)" || true
|
||||
netstat -tlnp | grep -E "(8333|9333|8080)" || true
|
||||
|
||||
- name: Upload test logs on failure
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: s3-retention-test-logs-${{ matrix.test-type }}
|
||||
path: test/s3/retention/weed-test*.log
|
||||
retention-days: 3
|
||||
|
||||
s3-cors-tests:
|
||||
name: S3 CORS Tests
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
matrix:
|
||||
test-type: ["quick", "comprehensive"]
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
id: go
|
||||
|
||||
- name: Install SeaweedFS
|
||||
run: |
|
||||
go install -buildvcs=false
|
||||
|
||||
- name: Run S3 CORS Tests - ${{ matrix.test-type }}
|
||||
timeout-minutes: 25
|
||||
working-directory: test/s3/cors
|
||||
run: |
|
||||
set -x
|
||||
echo "=== System Information ==="
|
||||
uname -a
|
||||
free -h
|
||||
df -h
|
||||
echo "=== Starting Tests ==="
|
||||
|
||||
# Run tests with automatic server management
|
||||
# The test-with-server target handles server startup/shutdown automatically
|
||||
if [ "${{ matrix.test-type }}" = "quick" ]; then
|
||||
# Override TEST_PATTERN for quick tests only
|
||||
make test-with-server TEST_PATTERN="TestCORSConfigurationManagement|TestServiceLevelCORS|TestCORSBasicWorkflow"
|
||||
else
|
||||
# Run all CORS tests
|
||||
make test-with-server
|
||||
fi
|
||||
|
||||
- name: Show server logs on failure
|
||||
if: failure()
|
||||
working-directory: test/s3/cors
|
||||
run: |
|
||||
echo "=== Server Logs ==="
|
||||
if [ -f weed-test.log ]; then
|
||||
echo "Last 100 lines of server logs:"
|
||||
tail -100 weed-test.log
|
||||
else
|
||||
echo "No server log file found"
|
||||
fi
|
||||
|
||||
echo "=== Test Environment ==="
|
||||
ps aux | grep -E "(weed|test)" || true
|
||||
netstat -tlnp | grep -E "(8333|9333|8080)" || true
|
||||
|
||||
- name: Upload test logs on failure
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: s3-cors-test-logs-${{ matrix.test-type }}
|
||||
path: test/s3/cors/weed-test*.log
|
||||
retention-days: 3
|
||||
|
||||
s3-retention-worm:
|
||||
name: S3 Retention WORM Integration Test
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 20
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
id: go
|
||||
|
||||
- name: Install SeaweedFS
|
||||
run: |
|
||||
go install -buildvcs=false
|
||||
|
||||
- name: Run WORM Integration Tests
|
||||
timeout-minutes: 15
|
||||
working-directory: test/s3/retention
|
||||
run: |
|
||||
set -x
|
||||
echo "=== System Information ==="
|
||||
uname -a
|
||||
free -h
|
||||
|
||||
# Run the WORM integration tests with automatic server management
|
||||
# The test-with-server target handles server startup/shutdown automatically
|
||||
make test-with-server TEST_PATTERN="TestWORM|TestRetentionExtendedAttributes|TestRetentionConcurrentOperations" || {
|
||||
echo "❌ WORM integration test failed, checking logs..."
|
||||
if [ -f weed-test.log ]; then
|
||||
echo "=== Server logs ==="
|
||||
tail -100 weed-test.log
|
||||
fi
|
||||
echo "=== Process information ==="
|
||||
ps aux | grep -E "(weed|test)" || true
|
||||
exit 1
|
||||
}
|
||||
|
||||
- name: Upload server logs on failure
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: s3-retention-worm-logs
|
||||
path: test/s3/retention/weed-test*.log
|
||||
retention-days: 3
|
||||
|
||||
s3-versioning-stress:
|
||||
name: S3 Versioning Stress Test
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 35
|
||||
# Only run stress tests on master branch pushes to avoid overloading PR testing
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/master'
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
id: go
|
||||
|
||||
- name: Install SeaweedFS
|
||||
run: |
|
||||
go install -buildvcs=false
|
||||
|
||||
- name: Run S3 Versioning Stress Tests
|
||||
timeout-minutes: 30
|
||||
working-directory: test/s3/versioning
|
||||
run: |
|
||||
set -x
|
||||
echo "=== System Information ==="
|
||||
uname -a
|
||||
free -h
|
||||
|
||||
# Run stress tests (concurrent operations)
|
||||
make test-versioning-stress || {
|
||||
echo "❌ Stress test failed, checking logs..."
|
||||
if [ -f weed-test.log ]; then
|
||||
echo "=== Server logs ==="
|
||||
tail -200 weed-test.log
|
||||
fi
|
||||
make clean
|
||||
exit 1
|
||||
}
|
||||
make clean
|
||||
|
||||
- name: Upload stress test logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: s3-versioning-stress-logs
|
||||
path: test/s3/versioning/weed-test*.log
|
||||
retention-days: 7
|
1083
.github/workflows/s3tests.yml
vendored
1083
.github/workflows/s3tests.yml
vendored
File diff suppressed because it is too large
Load diff
|
@ -1,79 +0,0 @@
|
|||
name: "test s3 over https using aws-cli"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, test-https-s3-awscli]
|
||||
pull_request:
|
||||
branches: [master, test-https-s3-awscli]
|
||||
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: some_access_key1
|
||||
AWS_SECRET_ACCESS_KEY: some_secret_key1
|
||||
AWS_ENDPOINT_URL: https://localhost:8443
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: weed
|
||||
|
||||
jobs:
|
||||
awscli-tests:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ^1.24
|
||||
|
||||
- name: Build SeaweedFS
|
||||
run: |
|
||||
go build
|
||||
|
||||
- name: Start SeaweedFS
|
||||
run: |
|
||||
set -e
|
||||
mkdir -p /tmp/data
|
||||
./weed server -s3 -dir=/tmp/data -s3.config=../docker/compose/s3.json &
|
||||
until curl -s http://localhost:8333/ > /dev/null; do sleep 1; done
|
||||
|
||||
- name: Setup Caddy
|
||||
run: |
|
||||
curl -fsSL "https://caddyserver.com/api/download?os=linux&arch=amd64" -o caddy
|
||||
chmod +x caddy
|
||||
./caddy version
|
||||
echo "{
|
||||
auto_https disable_redirects
|
||||
local_certs
|
||||
}
|
||||
localhost:8443 {
|
||||
tls internal
|
||||
reverse_proxy localhost:8333
|
||||
}" > Caddyfile
|
||||
|
||||
- name: Start Caddy
|
||||
run: |
|
||||
./caddy start
|
||||
until curl -fsS --insecure https://localhost:8443 > /dev/null; do sleep 1; done
|
||||
|
||||
- name: Create Bucket
|
||||
run: |
|
||||
aws --no-verify-ssl s3api create-bucket --bucket bucket
|
||||
|
||||
- name: Test PutObject
|
||||
run: |
|
||||
set -e
|
||||
dd if=/dev/urandom of=generated bs=1M count=2
|
||||
aws --no-verify-ssl s3api put-object --bucket bucket --key test-putobject --body generated
|
||||
aws --no-verify-ssl s3api get-object --bucket bucket --key test-putobject downloaded
|
||||
diff -q generated downloaded
|
||||
rm -f generated downloaded
|
||||
|
||||
- name: Test Multi-part Upload
|
||||
run: |
|
||||
set -e
|
||||
dd if=/dev/urandom of=generated bs=1M count=32
|
||||
aws --no-verify-ssl s3 cp --no-progress generated s3://bucket/test-multipart
|
||||
aws --no-verify-ssl s3 cp --no-progress s3://bucket/test-multipart downloaded
|
||||
diff -q generated downloaded
|
||||
rm -f generated downloaded
|
32
.gitignore
vendored
32
.gitignore
vendored
|
@ -55,8 +55,6 @@ Temporary Items
|
|||
# Mongo Explorer plugin:
|
||||
# .idea/mongoSettings.xml
|
||||
|
||||
## vscode
|
||||
.vscode
|
||||
## File-based project format:
|
||||
*.ipr
|
||||
*.iws
|
||||
|
@ -77,8 +75,6 @@ com_crashlytics_export_strings.xml
|
|||
crashlytics.properties
|
||||
crashlytics-build.properties
|
||||
|
||||
workspace/
|
||||
|
||||
test_data
|
||||
build
|
||||
target
|
||||
|
@ -87,31 +83,3 @@ other/java/hdfs/dependency-reduced-pom.xml
|
|||
|
||||
# binary file
|
||||
weed/weed
|
||||
docker/weed
|
||||
|
||||
# test generated files
|
||||
weed/*/*.jpg
|
||||
docker/weed_sub
|
||||
docker/weed_pub
|
||||
weed/mq/schema/example.parquet
|
||||
docker/agent_sub_record
|
||||
test/mq/bin/consumer
|
||||
test/mq/bin/producer
|
||||
test/producer
|
||||
bin/weed
|
||||
weed_binary
|
||||
/test/s3/copying/filerldb2
|
||||
/filerldb2
|
||||
/test/s3/retention/test-volume-data
|
||||
test/s3/cors/weed-test.log
|
||||
test/s3/cors/weed-server.pid
|
||||
/test/s3/cors/test-volume-data
|
||||
test/s3/cors/cors.test
|
||||
/test/s3/retention/filerldb2
|
||||
test/s3/retention/weed-server.pid
|
||||
test/s3/retention/weed-test.log
|
||||
/test/s3/versioning/test-volume-data
|
||||
test/s3/versioning/weed-test.log
|
||||
/docker/admin_integration/data
|
||||
docker/agent_pub_record
|
||||
docker/admin_integration/weed-local
|
||||
|
|
48
.travis.yml
Normal file
48
.travis.yml
Normal file
|
@ -0,0 +1,48 @@
|
|||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.12.x
|
||||
- 1.13.x
|
||||
- 1.14.x
|
||||
|
||||
before_install:
|
||||
- export PATH=/home/travis/gopath/bin:$PATH
|
||||
|
||||
install:
|
||||
- export CGO_ENABLED="0"
|
||||
- go env
|
||||
|
||||
script:
|
||||
- env GO111MODULE=on go test ./weed/...
|
||||
|
||||
before_deploy:
|
||||
- make release
|
||||
deploy:
|
||||
provider: releases
|
||||
skip_cleanup: true
|
||||
api_key:
|
||||
secure: ERL986+ncQ8lwAJUYDrQ8s2/FxF/cyNIwJIFCqspnWxQgGNNyokET9HapmlPSxjpFRF0q6L2WCg9OY3mSVRq4oI6hg1igOQ12KlLyN71XSJ3c8w0Ay5ho48TQ9l3f3Iu97mntBCe9l0R9pnT8wj1VI8YJxloXwUMG2yeTjA9aBI=
|
||||
file:
|
||||
- build/linux_arm.tar.gz
|
||||
- build/linux_arm64.tar.gz
|
||||
- build/linux_386.tar.gz
|
||||
- build/linux_amd64.tar.gz
|
||||
- build/linux_amd64_large_disk.tar.gz
|
||||
- build/darwin_amd64.tar.gz
|
||||
- build/darwin_amd64_large_disk.tar.gz
|
||||
- build/windows_386.zip
|
||||
- build/windows_amd64.zip
|
||||
- build/windows_amd64_large_disk.zip
|
||||
- build/freebsd_arm.tar.gz
|
||||
- build/freebsd_amd64.tar.gz
|
||||
- build/freebsd_386.tar.gz
|
||||
- build/netbsd_arm.tar.gz
|
||||
- build/netbsd_amd64.tar.gz
|
||||
- build/netbsd_386.tar.gz
|
||||
- build/openbsd_arm.tar.gz
|
||||
- build/openbsd_amd64.tar.gz
|
||||
- build/openbsd_386.tar.gz
|
||||
on:
|
||||
tags: true
|
||||
repo: chrislusf/seaweedfs
|
||||
go: 1.14.x
|
|
@ -1,74 +0,0 @@
|
|||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to make participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||
nationality, personal appearance, race, religion, or sexual identity and
|
||||
orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
- Using welcoming and inclusive language
|
||||
- Being respectful of differing viewpoints and experiences
|
||||
- Gracefully accepting constructive criticism
|
||||
- Focusing on what is best for the community
|
||||
- Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
- The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
- Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
- Public or private harassment
|
||||
- Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
- Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at <enteremailhere>. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
413
DESIGN.md
413
DESIGN.md
|
@ -1,413 +0,0 @@
|
|||
# SeaweedFS Task Distribution System Design
|
||||
|
||||
## Overview
|
||||
|
||||
This document describes the design of a distributed task management system for SeaweedFS that handles Erasure Coding (EC) and vacuum operations through a scalable admin server and worker process architecture.
|
||||
|
||||
## System Architecture
|
||||
|
||||
### High-Level Components
|
||||
|
||||
```
|
||||
┌─────────────────┐ ┌──────────────────┐ ┌─────────────────┐
|
||||
│ Master │◄──►│ Admin Server │◄──►│ Workers │
|
||||
│ │ │ │ │ │
|
||||
│ - Volume Info │ │ - Task Discovery │ │ - Task Exec │
|
||||
│ - Shard Status │ │ - Task Assign │ │ - Progress │
|
||||
│ - Heartbeats │ │ - Progress Track │ │ - Error Report │
|
||||
└─────────────────┘ └──────────────────┘ └─────────────────┘
|
||||
│ │ │
|
||||
│ │ │
|
||||
▼ ▼ ▼
|
||||
┌─────────────────┐ ┌──────────────────┐ ┌─────────────────┐
|
||||
│ Volume Servers │ │ Volume Monitor │ │ Task Execution │
|
||||
│ │ │ │ │ │
|
||||
│ - Store Volumes │ │ - Health Check │ │ - EC Convert │
|
||||
│ - EC Shards │ │ - Usage Stats │ │ - Vacuum Clean │
|
||||
│ - Report Status │ │ - State Sync │ │ - Status Report │
|
||||
└─────────────────┘ └──────────────────┘ └─────────────────┘
|
||||
```
|
||||
|
||||
## 1. Admin Server Design
|
||||
|
||||
### 1.1 Core Responsibilities
|
||||
|
||||
- **Task Discovery**: Scan volumes to identify EC and vacuum candidates
|
||||
- **Worker Management**: Track available workers and their capabilities
|
||||
- **Task Assignment**: Match tasks to optimal workers
|
||||
- **Progress Tracking**: Monitor in-progress tasks for capacity planning
|
||||
- **State Reconciliation**: Sync with master server for volume state updates
|
||||
|
||||
### 1.2 Task Discovery Engine
|
||||
|
||||
```go
|
||||
type TaskDiscoveryEngine struct {
|
||||
masterClient MasterClient
|
||||
volumeScanner VolumeScanner
|
||||
taskDetectors map[TaskType]TaskDetector
|
||||
scanInterval time.Duration
|
||||
}
|
||||
|
||||
type VolumeCandidate struct {
|
||||
VolumeID uint32
|
||||
Server string
|
||||
Collection string
|
||||
TaskType TaskType
|
||||
Priority TaskPriority
|
||||
Reason string
|
||||
DetectedAt time.Time
|
||||
Parameters map[string]interface{}
|
||||
}
|
||||
```
|
||||
|
||||
**EC Detection Logic**:
|
||||
- Find volumes >= 95% full and idle for > 1 hour
|
||||
- Exclude volumes already in EC format
|
||||
- Exclude volumes with ongoing operations
|
||||
- Prioritize by collection and age
|
||||
|
||||
**Vacuum Detection Logic**:
|
||||
- Find volumes with garbage ratio > 30%
|
||||
- Exclude read-only volumes
|
||||
- Exclude volumes with recent vacuum operations
|
||||
- Prioritize by garbage percentage
|
||||
|
||||
### 1.3 Worker Registry & Management
|
||||
|
||||
```go
|
||||
type WorkerRegistry struct {
|
||||
workers map[string]*Worker
|
||||
capabilities map[TaskType][]*Worker
|
||||
lastHeartbeat map[string]time.Time
|
||||
taskAssignment map[string]*Task
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
type Worker struct {
|
||||
ID string
|
||||
Address string
|
||||
Capabilities []TaskType
|
||||
MaxConcurrent int
|
||||
CurrentLoad int
|
||||
Status WorkerStatus
|
||||
LastSeen time.Time
|
||||
Performance WorkerMetrics
|
||||
}
|
||||
```
|
||||
|
||||
### 1.4 Task Assignment Algorithm
|
||||
|
||||
```go
|
||||
type TaskScheduler struct {
|
||||
registry *WorkerRegistry
|
||||
taskQueue *PriorityQueue
|
||||
inProgressTasks map[string]*InProgressTask
|
||||
volumeReservations map[uint32]*VolumeReservation
|
||||
}
|
||||
|
||||
// Worker Selection Criteria:
|
||||
// 1. Has required capability (EC or Vacuum)
|
||||
// 2. Available capacity (CurrentLoad < MaxConcurrent)
|
||||
// 3. Best performance history for task type
|
||||
// 4. Lowest current load
|
||||
// 5. Geographically close to volume server (optional)
|
||||
```
|
||||
|
||||
## 2. Worker Process Design
|
||||
|
||||
### 2.1 Worker Architecture
|
||||
|
||||
```go
|
||||
type MaintenanceWorker struct {
|
||||
id string
|
||||
config *WorkerConfig
|
||||
adminClient AdminClient
|
||||
taskExecutors map[TaskType]TaskExecutor
|
||||
currentTasks map[string]*RunningTask
|
||||
registry *TaskRegistry
|
||||
heartbeatTicker *time.Ticker
|
||||
requestTicker *time.Ticker
|
||||
}
|
||||
```
|
||||
|
||||
### 2.2 Task Execution Framework
|
||||
|
||||
```go
|
||||
type TaskExecutor interface {
|
||||
Execute(ctx context.Context, task *Task) error
|
||||
EstimateTime(task *Task) time.Duration
|
||||
ValidateResources(task *Task) error
|
||||
GetProgress() float64
|
||||
Cancel() error
|
||||
}
|
||||
|
||||
type ErasureCodingExecutor struct {
|
||||
volumeClient VolumeServerClient
|
||||
progress float64
|
||||
cancelled bool
|
||||
}
|
||||
|
||||
type VacuumExecutor struct {
|
||||
volumeClient VolumeServerClient
|
||||
progress float64
|
||||
cancelled bool
|
||||
}
|
||||
```
|
||||
|
||||
### 2.3 Worker Capabilities & Registration
|
||||
|
||||
```go
|
||||
type WorkerCapabilities struct {
|
||||
SupportedTasks []TaskType
|
||||
MaxConcurrent int
|
||||
ResourceLimits ResourceLimits
|
||||
PreferredServers []string // Affinity for specific volume servers
|
||||
}
|
||||
|
||||
type ResourceLimits struct {
|
||||
MaxMemoryMB int64
|
||||
MaxDiskSpaceMB int64
|
||||
MaxNetworkMbps int64
|
||||
MaxCPUPercent float64
|
||||
}
|
||||
```
|
||||
|
||||
## 3. Task Lifecycle Management
|
||||
|
||||
### 3.1 Task States
|
||||
|
||||
```go
|
||||
type TaskState string
|
||||
|
||||
const (
|
||||
TaskStatePending TaskState = "pending"
|
||||
TaskStateAssigned TaskState = "assigned"
|
||||
TaskStateInProgress TaskState = "in_progress"
|
||||
TaskStateCompleted TaskState = "completed"
|
||||
TaskStateFailed TaskState = "failed"
|
||||
TaskStateCancelled TaskState = "cancelled"
|
||||
TaskStateStuck TaskState = "stuck" // Taking too long
|
||||
TaskStateDuplicate TaskState = "duplicate" // Detected duplicate
|
||||
)
|
||||
```
|
||||
|
||||
### 3.2 Progress Tracking & Monitoring
|
||||
|
||||
```go
|
||||
type InProgressTask struct {
|
||||
Task *Task
|
||||
WorkerID string
|
||||
StartedAt time.Time
|
||||
LastUpdate time.Time
|
||||
Progress float64
|
||||
EstimatedEnd time.Time
|
||||
VolumeReserved bool // Reserved for capacity planning
|
||||
}
|
||||
|
||||
type TaskMonitor struct {
|
||||
inProgressTasks map[string]*InProgressTask
|
||||
timeoutChecker *time.Ticker
|
||||
stuckDetector *time.Ticker
|
||||
duplicateChecker *time.Ticker
|
||||
}
|
||||
```
|
||||
|
||||
## 4. Volume Capacity Reconciliation
|
||||
|
||||
### 4.1 Volume State Tracking
|
||||
|
||||
```go
|
||||
type VolumeStateManager struct {
|
||||
masterClient MasterClient
|
||||
inProgressTasks map[uint32]*InProgressTask // VolumeID -> Task
|
||||
committedChanges map[uint32]*VolumeChange // Changes not yet in master
|
||||
reconcileInterval time.Duration
|
||||
}
|
||||
|
||||
type VolumeChange struct {
|
||||
VolumeID uint32
|
||||
ChangeType ChangeType // "ec_encoding", "vacuum_completed"
|
||||
OldCapacity int64
|
||||
NewCapacity int64
|
||||
TaskID string
|
||||
CompletedAt time.Time
|
||||
ReportedToMaster bool
|
||||
}
|
||||
```
|
||||
|
||||
### 4.2 Shard Assignment Integration
|
||||
|
||||
When the master needs to assign shards, it must consider:
|
||||
1. **Current volume state** from its own records
|
||||
2. **In-progress capacity changes** from admin server
|
||||
3. **Committed but unreported changes** from admin server
|
||||
|
||||
```go
|
||||
type CapacityOracle struct {
|
||||
adminServer AdminServerClient
|
||||
masterState *MasterVolumeState
|
||||
updateFreq time.Duration
|
||||
}
|
||||
|
||||
func (o *CapacityOracle) GetAdjustedCapacity(volumeID uint32) int64 {
|
||||
baseCapacity := o.masterState.GetCapacity(volumeID)
|
||||
|
||||
// Adjust for in-progress tasks
|
||||
if task := o.adminServer.GetInProgressTask(volumeID); task != nil {
|
||||
switch task.Type {
|
||||
case TaskTypeErasureCoding:
|
||||
// EC reduces effective capacity
|
||||
return baseCapacity / 2 // Simplified
|
||||
case TaskTypeVacuum:
|
||||
// Vacuum may increase available space
|
||||
return baseCapacity + int64(float64(baseCapacity) * 0.3)
|
||||
}
|
||||
}
|
||||
|
||||
// Adjust for completed but unreported changes
|
||||
if change := o.adminServer.GetPendingChange(volumeID); change != nil {
|
||||
return change.NewCapacity
|
||||
}
|
||||
|
||||
return baseCapacity
|
||||
}
|
||||
```
|
||||
|
||||
## 5. Error Handling & Recovery
|
||||
|
||||
### 5.1 Worker Failure Scenarios
|
||||
|
||||
```go
|
||||
type FailureHandler struct {
|
||||
taskRescheduler *TaskRescheduler
|
||||
workerMonitor *WorkerMonitor
|
||||
alertManager *AlertManager
|
||||
}
|
||||
|
||||
// Failure Scenarios:
|
||||
// 1. Worker becomes unresponsive (heartbeat timeout)
|
||||
// 2. Task execution fails (reported by worker)
|
||||
// 3. Task gets stuck (progress timeout)
|
||||
// 4. Duplicate task detection
|
||||
// 5. Resource exhaustion
|
||||
```
|
||||
|
||||
### 5.2 Recovery Strategies
|
||||
|
||||
**Worker Timeout Recovery**:
|
||||
- Mark worker as inactive after 3 missed heartbeats
|
||||
- Reschedule all assigned tasks to other workers
|
||||
- Cleanup any partial state
|
||||
|
||||
**Task Stuck Recovery**:
|
||||
- Detect tasks with no progress for > 2x estimated time
|
||||
- Cancel stuck task and mark volume for cleanup
|
||||
- Reschedule if retry count < max_retries
|
||||
|
||||
**Duplicate Task Prevention**:
|
||||
```go
|
||||
type DuplicateDetector struct {
|
||||
activeFingerprints map[string]bool // VolumeID+TaskType
|
||||
recentCompleted *LRUCache // Recently completed tasks
|
||||
}
|
||||
|
||||
func (d *DuplicateDetector) IsTaskDuplicate(task *Task) bool {
|
||||
fingerprint := fmt.Sprintf("%d-%s", task.VolumeID, task.Type)
|
||||
return d.activeFingerprints[fingerprint] ||
|
||||
d.recentCompleted.Contains(fingerprint)
|
||||
}
|
||||
```
|
||||
|
||||
## 6. Simulation & Testing Framework
|
||||
|
||||
### 6.1 Failure Simulation
|
||||
|
||||
```go
|
||||
type TaskSimulator struct {
|
||||
scenarios map[string]SimulationScenario
|
||||
}
|
||||
|
||||
type SimulationScenario struct {
|
||||
Name string
|
||||
WorkerCount int
|
||||
VolumeCount int
|
||||
FailurePatterns []FailurePattern
|
||||
Duration time.Duration
|
||||
}
|
||||
|
||||
type FailurePattern struct {
|
||||
Type FailureType // "worker_timeout", "task_stuck", "duplicate"
|
||||
Probability float64 // 0.0 to 1.0
|
||||
Timing TimingSpec // When during task execution
|
||||
Duration time.Duration
|
||||
}
|
||||
```
|
||||
|
||||
### 6.2 Test Scenarios
|
||||
|
||||
**Scenario 1: Worker Timeout During EC**
|
||||
- Start EC task on 30GB volume
|
||||
- Kill worker at 50% progress
|
||||
- Verify task reassignment
|
||||
- Verify no duplicate EC operations
|
||||
|
||||
**Scenario 2: Stuck Vacuum Task**
|
||||
- Start vacuum on high-garbage volume
|
||||
- Simulate worker hanging at 75% progress
|
||||
- Verify timeout detection and cleanup
|
||||
- Verify volume state consistency
|
||||
|
||||
**Scenario 3: Duplicate Task Prevention**
|
||||
- Submit same EC task from multiple sources
|
||||
- Verify only one task executes
|
||||
- Verify proper conflict resolution
|
||||
|
||||
**Scenario 4: Master-Admin State Divergence**
|
||||
- Create in-progress EC task
|
||||
- Simulate master restart
|
||||
- Verify state reconciliation
|
||||
- Verify shard assignment accounts for in-progress work
|
||||
|
||||
## 7. Performance & Scalability
|
||||
|
||||
### 7.1 Metrics & Monitoring
|
||||
|
||||
```go
|
||||
type SystemMetrics struct {
|
||||
TasksPerSecond float64
|
||||
WorkerUtilization float64
|
||||
AverageTaskTime time.Duration
|
||||
FailureRate float64
|
||||
QueueDepth int
|
||||
VolumeStatesSync bool
|
||||
}
|
||||
```
|
||||
|
||||
### 7.2 Scalability Considerations
|
||||
|
||||
- **Horizontal Worker Scaling**: Add workers without admin server changes
|
||||
- **Admin Server HA**: Master-slave admin servers for fault tolerance
|
||||
- **Task Partitioning**: Partition tasks by collection or datacenter
|
||||
- **Batch Operations**: Group similar tasks for efficiency
|
||||
|
||||
## 8. Implementation Plan
|
||||
|
||||
### Phase 1: Core Infrastructure
|
||||
1. Admin server basic framework
|
||||
2. Worker registration and heartbeat
|
||||
3. Simple task assignment
|
||||
4. Basic progress tracking
|
||||
|
||||
### Phase 2: Advanced Features
|
||||
1. Volume state reconciliation
|
||||
2. Sophisticated worker selection
|
||||
3. Failure detection and recovery
|
||||
4. Duplicate prevention
|
||||
|
||||
### Phase 3: Optimization & Monitoring
|
||||
1. Performance metrics
|
||||
2. Load balancing algorithms
|
||||
3. Capacity planning integration
|
||||
4. Comprehensive monitoring
|
||||
|
||||
This design provides a robust, scalable foundation for distributed task management in SeaweedFS while maintaining consistency with the existing architecture patterns.
|
2
LICENSE
2
LICENSE
|
@ -186,7 +186,7 @@
|
|||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2025 Chris Lu
|
||||
Copyright 2016 Chris Lu
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
|
168
Makefile
168
Makefile
|
@ -1,71 +1,133 @@
|
|||
.PHONY: test admin-generate admin-build admin-clean admin-dev admin-run admin-test admin-fmt admin-help
|
||||
BINARY = weed/weed
|
||||
package = github.com/chrislusf/seaweedfs/weed
|
||||
|
||||
BINARY = weed
|
||||
ADMIN_DIR = weed/admin
|
||||
GO_FLAGS = #-v
|
||||
SOURCE_DIR = ./weed/
|
||||
|
||||
SOURCE_DIR = .
|
||||
debug ?= 0
|
||||
appname := weed
|
||||
|
||||
all: install
|
||||
sources := $(wildcard *.go)
|
||||
|
||||
install: admin-generate
|
||||
cd weed; go install
|
||||
COMMIT ?= $(shell git rev-parse --short HEAD)
|
||||
LDFLAGS ?= -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${COMMIT}
|
||||
|
||||
warp_install:
|
||||
go install github.com/minio/warp@v0.7.6
|
||||
build = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -ldflags "-extldflags -static $(LDFLAGS)" -o build/$(appname)$(3) $(SOURCE_DIR)
|
||||
tar = cd build && tar -cvzf $(1)_$(2).tar.gz $(appname)$(3) && rm $(appname)$(3)
|
||||
zip = cd build && zip $(1)_$(2).zip $(appname)$(3) && rm $(appname)$(3)
|
||||
|
||||
full_install: admin-generate
|
||||
cd weed; go install -tags "elastic gocdk sqlite ydb tarantool tikv rclone"
|
||||
build_large = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -tags 5BytesOffset -ldflags "-extldflags -static $(LDFLAGS)" -o build/$(appname)$(3) $(SOURCE_DIR)
|
||||
tar_large = cd build && tar -cvzf $(1)_$(2)_large_disk.tar.gz $(appname)$(3) && rm $(appname)$(3)
|
||||
zip_large = cd build && zip $(1)_$(2)_large_disk.zip $(appname)$(3) && rm $(appname)$(3)
|
||||
|
||||
server: install
|
||||
weed -v 0 server -s3 -filer -filer.maxMB=64 -volume.max=0 -master.volumeSizeLimitMB=100 -volume.preStopSeconds=1 -s3.port=8000 -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config=./docker/compose/s3.json -metricsPort=9324
|
||||
all: build
|
||||
|
||||
benchmark: install warp_install
|
||||
pkill weed || true
|
||||
pkill warp || true
|
||||
weed server -debug=$(debug) -s3 -filer -volume.max=0 -master.volumeSizeLimitMB=100 -volume.preStopSeconds=1 -s3.port=8000 -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=false -s3.config=./docker/compose/s3.json &
|
||||
warp client &
|
||||
while ! nc -z localhost 8000 ; do sleep 1 ; done
|
||||
warp mixed --host=127.0.0.1:8000 --access-key=some_access_key1 --secret-key=some_secret_key1 --autoterm
|
||||
pkill warp
|
||||
pkill weed
|
||||
.PHONY : clean deps build linux release windows_build darwin_build linux_build bsd_build clean
|
||||
|
||||
# curl -o profile "http://127.0.0.1:6060/debug/pprof/profile?debug=1"
|
||||
benchmark_with_pprof: debug = 1
|
||||
benchmark_with_pprof: benchmark
|
||||
clean:
|
||||
go clean -i $(GO_FLAGS) $(SOURCE_DIR)
|
||||
rm -f $(BINARY)
|
||||
rm -rf build/
|
||||
|
||||
test: admin-generate
|
||||
cd weed; go test -tags "elastic gocdk sqlite ydb tarantool tikv rclone" -v ./...
|
||||
deps:
|
||||
go get $(GO_FLAGS) -d $(SOURCE_DIR)
|
||||
rm -rf /home/travis/gopath/src/github.com/coreos/etcd/vendor/golang.org/x/net/trace
|
||||
rm -rf /home/travis/gopath/src/go.etcd.io/etcd/vendor/golang.org/x/net/trace
|
||||
|
||||
# Admin component targets
|
||||
admin-generate:
|
||||
@echo "Generating admin component templates..."
|
||||
@cd $(ADMIN_DIR) && $(MAKE) generate
|
||||
build: deps
|
||||
go build $(GO_FLAGS) -ldflags "$(LDFLAGS)" -o $(BINARY) $(SOURCE_DIR)
|
||||
|
||||
admin-build: admin-generate
|
||||
@echo "Building admin component..."
|
||||
@cd $(ADMIN_DIR) && $(MAKE) build
|
||||
linux: deps
|
||||
mkdir -p linux
|
||||
GOOS=linux GOARCH=amd64 go build $(GO_FLAGS) -ldflags "$(LDFLAGS)" -o linux/$(BINARY) $(SOURCE_DIR)
|
||||
|
||||
admin-clean:
|
||||
@echo "Cleaning admin component..."
|
||||
@cd $(ADMIN_DIR) && $(MAKE) clean
|
||||
release: deps windows_build darwin_build linux_build bsd_build 5_byte_linux_build 5_byte_darwin_build 5_byte_windows_build
|
||||
|
||||
admin-dev:
|
||||
@echo "Starting admin development server..."
|
||||
@cd $(ADMIN_DIR) && $(MAKE) dev
|
||||
##### LINUX BUILDS #####
|
||||
5_byte_linux_build:
|
||||
$(call build_large,linux,amd64,)
|
||||
$(call tar_large,linux,amd64)
|
||||
|
||||
admin-run:
|
||||
@echo "Running admin server..."
|
||||
@cd $(ADMIN_DIR) && $(MAKE) run
|
||||
5_byte_darwin_build:
|
||||
$(call build_large,darwin,amd64,)
|
||||
$(call tar_large,darwin,amd64)
|
||||
|
||||
admin-test:
|
||||
@echo "Testing admin component..."
|
||||
@cd $(ADMIN_DIR) && $(MAKE) test
|
||||
5_byte_windows_build:
|
||||
$(call build_large,windows,amd64,.exe)
|
||||
$(call zip_large,windows,amd64,.exe)
|
||||
|
||||
admin-fmt:
|
||||
@echo "Formatting admin component..."
|
||||
@cd $(ADMIN_DIR) && $(MAKE) fmt
|
||||
linux_build: build/linux_arm.tar.gz build/linux_arm64.tar.gz build/linux_386.tar.gz build/linux_amd64.tar.gz
|
||||
|
||||
admin-help:
|
||||
@echo "Admin component help..."
|
||||
@cd $(ADMIN_DIR) && $(MAKE) help
|
||||
build/linux_386.tar.gz: $(sources)
|
||||
$(call build,linux,386,)
|
||||
$(call tar,linux,386)
|
||||
|
||||
build/linux_amd64.tar.gz: $(sources)
|
||||
$(call build,linux,amd64,)
|
||||
$(call tar,linux,amd64)
|
||||
|
||||
build/linux_arm.tar.gz: $(sources)
|
||||
$(call build,linux,arm,)
|
||||
$(call tar,linux,arm)
|
||||
|
||||
build/linux_arm64.tar.gz: $(sources)
|
||||
$(call build,linux,arm64,)
|
||||
$(call tar,linux,arm64)
|
||||
|
||||
##### DARWIN (MAC) BUILDS #####
|
||||
darwin_build: build/darwin_amd64.tar.gz
|
||||
|
||||
build/darwin_amd64.tar.gz: $(sources)
|
||||
$(call build,darwin,amd64,)
|
||||
$(call tar,darwin,amd64)
|
||||
|
||||
##### WINDOWS BUILDS #####
|
||||
windows_build: build/windows_386.zip build/windows_amd64.zip
|
||||
|
||||
build/windows_386.zip: $(sources)
|
||||
$(call build,windows,386,.exe)
|
||||
$(call zip,windows,386,.exe)
|
||||
|
||||
build/windows_amd64.zip: $(sources)
|
||||
$(call build,windows,amd64,.exe)
|
||||
$(call zip,windows,amd64,.exe)
|
||||
|
||||
##### BSD BUILDS #####
|
||||
bsd_build: build/freebsd_arm.tar.gz build/freebsd_386.tar.gz build/freebsd_amd64.tar.gz \
|
||||
build/netbsd_arm.tar.gz build/netbsd_386.tar.gz build/netbsd_amd64.tar.gz \
|
||||
build/openbsd_arm.tar.gz build/openbsd_386.tar.gz build/openbsd_amd64.tar.gz
|
||||
|
||||
build/freebsd_386.tar.gz: $(sources)
|
||||
$(call build,freebsd,386,)
|
||||
$(call tar,freebsd,386)
|
||||
|
||||
build/freebsd_amd64.tar.gz: $(sources)
|
||||
$(call build,freebsd,amd64,)
|
||||
$(call tar,freebsd,amd64)
|
||||
|
||||
build/freebsd_arm.tar.gz: $(sources)
|
||||
$(call build,freebsd,arm,)
|
||||
$(call tar,freebsd,arm)
|
||||
|
||||
build/netbsd_386.tar.gz: $(sources)
|
||||
$(call build,netbsd,386,)
|
||||
$(call tar,netbsd,386)
|
||||
|
||||
build/netbsd_amd64.tar.gz: $(sources)
|
||||
$(call build,netbsd,amd64,)
|
||||
$(call tar,netbsd,amd64)
|
||||
|
||||
build/netbsd_arm.tar.gz: $(sources)
|
||||
$(call build,netbsd,arm,)
|
||||
$(call tar,netbsd,arm)
|
||||
|
||||
build/openbsd_386.tar.gz: $(sources)
|
||||
$(call build,openbsd,386,)
|
||||
$(call tar,openbsd,386)
|
||||
|
||||
build/openbsd_amd64.tar.gz: $(sources)
|
||||
$(call build,openbsd,amd64,)
|
||||
$(call tar,openbsd,amd64)
|
||||
|
||||
build/openbsd_arm.tar.gz: $(sources)
|
||||
$(call build,openbsd,arm,)
|
||||
$(call tar,openbsd,arm)
|
||||
|
|
346
README.md
346
README.md
|
@ -1,26 +1,24 @@
|
|||
# SeaweedFS
|
||||
|
||||
|
||||
[](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTEyYzZmZWYzOGQ3MDJlZWMzYmI0OTE4OTJiZjJjODBmMzUxNmYwODg0YjY3MTNlMjBmZDQ1NzQ5NDJhZWI2ZmY)
|
||||
[](https://twitter.com/intent/follow?screen_name=seaweedfs)
|
||||
[](https://github.com/seaweedfs/seaweedfs/actions/workflows/go.yml)
|
||||
[](https://godoc.org/github.com/seaweedfs/seaweedfs/weed)
|
||||
[](https://github.com/seaweedfs/seaweedfs/wiki)
|
||||
[](https://hub.docker.com/r/chrislusf/seaweedfs/)
|
||||
[](https://search.maven.org/search?q=g:com.github.chrislusf)
|
||||
[](https://artifacthub.io/packages/search?repo=seaweedfs)
|
||||
[](https://travis-ci.org/chrislusf/seaweedfs)
|
||||
[](https://godoc.org/github.com/chrislusf/seaweedfs/weed)
|
||||
[](https://github.com/chrislusf/seaweedfs/wiki)
|
||||
[](https://hub.docker.com/r/chrislusf/seaweedfs/)
|
||||
|
||||

|
||||

|
||||
|
||||
<h2 align="center"><a href="https://www.patreon.com/seaweedfs">Sponsor SeaweedFS via Patreon</a></h2>
|
||||
<h2 align="center">Supporting SeaweedFS</h2>
|
||||
|
||||
SeaweedFS is an independent Apache-licensed open source project with its ongoing development made
|
||||
possible entirely thanks to the support of these awesome [backers](https://github.com/seaweedfs/seaweedfs/blob/master/backers.md).
|
||||
possible entirely thanks to the support of these awesome [backers](https://github.com/chrislusf/seaweedfs/blob/master/backers.md).
|
||||
If you'd like to grow SeaweedFS even stronger, please consider joining our
|
||||
<a href="https://www.patreon.com/seaweedfs">sponsors on Patreon</a>.
|
||||
|
||||
Your support will be really appreciated by me and other supporters!
|
||||
|
||||
<h3 align="center"><a href="https://www.patreon.com/seaweedfs">Sponsor SeaweedFS via Patreon</a></h3>
|
||||
|
||||
<!--
|
||||
<h4 align="center">Platinum</h4>
|
||||
|
||||
|
@ -29,168 +27,115 @@ Your support will be really appreciated by me and other supporters!
|
|||
Add your name or icon here
|
||||
</a>
|
||||
</p>
|
||||
-->
|
||||
|
||||
### Gold Sponsors
|
||||
[](https://www.nodion.com)
|
||||
[](https://www.piknik.com)
|
||||
[](https://www.keepsec.ca)
|
||||
<h4 align="center">Gold</h4>
|
||||
|
||||
<table>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td align="center" valign="middle">
|
||||
<a href="" target="_blank">
|
||||
Add your name or icon here
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
-->
|
||||
|
||||
---
|
||||
|
||||
- [Download Binaries for different platforms](https://github.com/seaweedfs/seaweedfs/releases/latest)
|
||||
|
||||
- [Download Binaries for different platforms](https://github.com/chrislusf/seaweedfs/releases/latest)
|
||||
- [SeaweedFS on Slack](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTEyYzZmZWYzOGQ3MDJlZWMzYmI0OTE4OTJiZjJjODBmMzUxNmYwODg0YjY3MTNlMjBmZDQ1NzQ5NDJhZWI2ZmY)
|
||||
- [SeaweedFS on Twitter](https://twitter.com/SeaweedFS)
|
||||
- [SeaweedFS on Telegram](https://t.me/Seaweedfs)
|
||||
- [SeaweedFS on Reddit](https://www.reddit.com/r/SeaweedFS/)
|
||||
- [SeaweedFS Mailing List](https://groups.google.com/d/forum/seaweedfs)
|
||||
- [Wiki Documentation](https://github.com/seaweedfs/seaweedfs/wiki)
|
||||
- [SeaweedFS White Paper](https://github.com/seaweedfs/seaweedfs/wiki/SeaweedFS_Architecture.pdf)
|
||||
- [SeaweedFS Introduction Slides 2025.5](https://docs.google.com/presentation/d/1tdkp45J01oRV68dIm4yoTXKJDof-EhainlA0LMXexQE/edit?usp=sharing)
|
||||
- [SeaweedFS Introduction Slides 2021.5](https://docs.google.com/presentation/d/1DcxKWlINc-HNCjhYeERkpGXXm6nTCES8mi2W5G0Z4Ts/edit?usp=sharing)
|
||||
- [SeaweedFS Introduction Slides 2019.3](https://www.slideshare.net/chrislusf/seaweedfs-introduction)
|
||||
- [Wiki Documentation](https://github.com/chrislusf/seaweedfs/wiki)
|
||||
- [SeaweedFS Introduction Slides](https://www.slideshare.net/chrislusf/seaweedfs-introduction)
|
||||
|
||||
Table of Contents
|
||||
=================
|
||||
|
||||
* [Quick Start](#quick-start)
|
||||
* [Quick Start for S3 API on Docker](#quick-start-for-s3-api-on-docker)
|
||||
* [Quick Start with Single Binary](#quick-start-with-single-binary)
|
||||
* [Quick Start SeaweedFS S3 on AWS](#quick-start-seaweedfs-s3-on-aws)
|
||||
* [Introduction](#introduction)
|
||||
* [Features](#features)
|
||||
* [Additional Features](#additional-features)
|
||||
* [Filer Features](#filer-features)
|
||||
* [Example: Using Seaweed Object Store](#example-using-seaweed-object-store)
|
||||
* [Architecture](#object-store-architecture)
|
||||
* [Example Usage](#example-usage)
|
||||
* [Architecture](#architecture)
|
||||
* [Compared to Other File Systems](#compared-to-other-file-systems)
|
||||
* [Compared to HDFS](#compared-to-hdfs)
|
||||
* [Compared to GlusterFS, Ceph](#compared-to-glusterfs-ceph)
|
||||
* [Compared to GlusterFS](#compared-to-glusterfs)
|
||||
* [Compared to Ceph](#compared-to-ceph)
|
||||
* [Compared to Minio](#compared-to-minio)
|
||||
* [Dev Plan](#dev-plan)
|
||||
* [Installation Guide](#installation-guide)
|
||||
* [Disk Related Topics](#disk-related-topics)
|
||||
* [Benchmark](#benchmark)
|
||||
* [Enterprise](#enterprise)
|
||||
* [Benchmark](#Benchmark)
|
||||
* [License](#license)
|
||||
|
||||
# Quick Start #
|
||||
|
||||
## Quick Start for S3 API on Docker ##
|
||||
|
||||
`docker run -p 8333:8333 chrislusf/seaweedfs server -s3`
|
||||
|
||||
## Quick Start with Single Binary ##
|
||||
* Download the latest binary from https://github.com/seaweedfs/seaweedfs/releases and unzip a single binary file `weed` or `weed.exe`. Or run `go install github.com/seaweedfs/seaweedfs/weed@latest`.
|
||||
* `export AWS_ACCESS_KEY_ID=admin ; export AWS_SECRET_ACCESS_KEY=key` as the admin credentials to access the object store.
|
||||
* Run `weed server -dir=/some/data/dir -s3` to start one master, one volume server, one filer, and one S3 gateway.
|
||||
|
||||
Also, to increase capacity, just add more volume servers by running `weed volume -dir="/some/data/dir2" -mserver="<master_host>:9333" -port=8081` locally, or on a different machine, or on thousands of machines. That is it!
|
||||
|
||||
## Quick Start SeaweedFS S3 on AWS ##
|
||||
* Setup fast production-ready [SeaweedFS S3 on AWS with cloudformation](https://aws.amazon.com/marketplace/pp/prodview-nzelz5gprlrjc)
|
||||
|
||||
# Introduction #
|
||||
## Introduction ##
|
||||
|
||||
SeaweedFS is a simple and highly scalable distributed file system. There are two objectives:
|
||||
|
||||
1. to store billions of files!
|
||||
2. to serve the files fast!
|
||||
|
||||
SeaweedFS started as an Object Store to handle small files efficiently.
|
||||
Instead of managing all file metadata in a central master,
|
||||
the central master only manages volumes on volume servers,
|
||||
and these volume servers manage files and their metadata.
|
||||
This relieves concurrency pressure from the central master and spreads file metadata into volume servers,
|
||||
allowing faster file access (O(1), usually just one disk read operation).
|
||||
SeaweedFS started as an Object Store to handle small files efficiently. Instead of managing all file metadata in a central master, the central master only manages file volumes, and it lets these volume servers manage files and their metadata. This relieves concurrency pressure from the central master and spreads file metadata into volume servers, allowing faster file access (O(1), usually just one disk read operation).
|
||||
|
||||
There is only 40 bytes of disk storage overhead for each file's metadata.
|
||||
It is so simple with O(1) disk reads that you are welcome to challenge the performance with your actual use cases.
|
||||
SeaweedFS can transparently integrate with the cloud. With hot data on local cluster, and warm data on the cloud with O(1) access time, SeaweedFS can achieve both fast local access time and elastic cloud storage capacity, without any client side changes.
|
||||
|
||||
SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf).
|
||||
Also, SeaweedFS implements erasure coding with ideas from
|
||||
[f4: Facebook’s Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf), and has a lot of similarities with [Facebook’s Tectonic Filesystem](https://www.usenix.org/system/files/fast21-pan.pdf)
|
||||
There is only 40 bytes of disk storage overhead for each file's metadata. It is so simple with O(1) disk reads that you are welcome to challenge the performance with your actual use cases.
|
||||
|
||||
On top of the object store, optional [Filer] can support directories and POSIX attributes.
|
||||
Filer is a separate linearly-scalable stateless server with customizable metadata stores,
|
||||
e.g., MySql, Postgres, Redis, Cassandra, HBase, Mongodb, Elastic Search, LevelDB, RocksDB, Sqlite, MemSql, TiDB, Etcd, CockroachDB, YDB, etc.
|
||||
SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf). Also, SeaweedFS implements erasure coding with ideas from [f4: Facebook’s Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf)
|
||||
|
||||
For any distributed key value stores, the large values can be offloaded to SeaweedFS.
|
||||
With the fast access speed and linearly scalable capacity,
|
||||
SeaweedFS can work as a distributed [Key-Large-Value store][KeyLargeValueStore].
|
||||
|
||||
SeaweedFS can transparently integrate with the cloud.
|
||||
With hot data on local cluster, and warm data on the cloud with O(1) access time,
|
||||
SeaweedFS can achieve both fast local access time and elastic cloud storage capacity.
|
||||
What's more, the cloud storage access API cost is minimized.
|
||||
Faster and cheaper than direct cloud storage!
|
||||
On top of the object store, optional [Filer] can support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql, Postgres, Mongodb, Redis, Etcd, Cassandra, LevelDB, MemSql, TiDB, TiKV, CockroachDB, etc.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
# Features #
|
||||
## Additional Features ##
|
||||
* Can choose no replication or different replication levels, rack and data center aware.
|
||||
* Automatic master servers failover - no single point of failure (SPOF).
|
||||
* Automatic Gzip compression depending on file MIME type.
|
||||
* Automatic Gzip compression depending on file mime type.
|
||||
* Automatic compaction to reclaim disk space after deletion or update.
|
||||
* [Automatic entry TTL expiration][VolumeServerTTL].
|
||||
* Any server with some disk space can add to the total storage space.
|
||||
* Adding/Removing servers does **not** cause any data re-balancing unless triggered by admin commands.
|
||||
* Any server with some disk spaces can add to the total storage space.
|
||||
* Adding/Removing servers does **not** cause any data re-balancing.
|
||||
* Optional picture resizing.
|
||||
* Support ETag, Accept-Range, Last-Modified, etc.
|
||||
* Support in-memory/leveldb/readonly mode tuning for memory/performance balance.
|
||||
* Support rebalancing the writable and readonly volumes.
|
||||
* [Customizable Multiple Storage Tiers][TieredStorage]: Customizable storage disk types to balance performance and cost.
|
||||
* [Transparent cloud integration][CloudTier]: unlimited capacity via tiered cloud storage for warm data.
|
||||
* [Erasure Coding for warm storage][ErasureCoding] Rack-Aware 10.4 erasure coding reduces storage cost and increases availability.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Filer Features ##
|
||||
* [Filer server][Filer] provides "normal" directories and files via HTTP.
|
||||
* [File TTL][FilerTTL] automatically expires file metadata and actual file data.
|
||||
* [Mount filer][Mount] reads and writes files directly as a local directory via FUSE.
|
||||
* [Filer Store Replication][FilerStoreReplication] enables HA for filer meta data stores.
|
||||
* [Active-Active Replication][ActiveActiveAsyncReplication] enables asynchronous one-way or two-way cross cluster continuous replication.
|
||||
* [Amazon S3 compatible API][AmazonS3API] accesses files with S3 tooling.
|
||||
* [Hadoop Compatible File System][Hadoop] accesses files from Hadoop/Spark/Flink/etc or even runs HBase.
|
||||
* [Async Replication To Cloud][BackupToCloud] has extremely fast local access and backups to Amazon S3, Google Cloud Storage, Azure, BackBlaze.
|
||||
* [WebDAV] accesses as a mapped drive on Mac and Windows, or from mobile devices.
|
||||
* [filer server][Filer] provide "normal" directories and files via http.
|
||||
* [mount filer][Mount] to read and write files directly as a local directory via FUSE.
|
||||
* [Amazon S3 compatible API][AmazonS3API] to access files with S3 tooling.
|
||||
* [Hadoop Compatible File System][Hadoop] to access files from Hadoop/Spark/Flink/etc jobs.
|
||||
* [Async Backup To Cloud][BackupToCloud] has extremely fast local access and backups to Amazon S3, Google Cloud Storage, Azure, BackBlaze.
|
||||
* [WebDAV] access as a mapped drive on Mac and Windows, or from mobile devices.
|
||||
* [AES256-GCM Encrypted Storage][FilerDataEncryption] safely stores the encrypted data.
|
||||
* [Super Large Files][SuperLargeFiles] stores large or super large files in tens of TB.
|
||||
* [Cloud Drive][CloudDrive] mounts cloud storage to local cluster, cached for fast read and write with asynchronous write back.
|
||||
* [Gateway to Remote Object Store][GatewayToRemoteObjectStore] mirrors bucket operations to remote object storage, in addition to [Cloud Drive][CloudDrive]
|
||||
|
||||
## Kubernetes ##
|
||||
* [Kubernetes CSI Driver][SeaweedFsCsiDriver] A Container Storage Interface (CSI) Driver. [](https://hub.docker.com/r/chrislusf/seaweedfs-csi-driver/)
|
||||
* [SeaweedFS Operator](https://github.com/seaweedfs/seaweedfs-operator)
|
||||
|
||||
[Filer]: https://github.com/seaweedfs/seaweedfs/wiki/Directories-and-Files
|
||||
[SuperLargeFiles]: https://github.com/seaweedfs/seaweedfs/wiki/Data-Structure-for-Large-Files
|
||||
[Mount]: https://github.com/seaweedfs/seaweedfs/wiki/FUSE-Mount
|
||||
[AmazonS3API]: https://github.com/seaweedfs/seaweedfs/wiki/Amazon-S3-API
|
||||
[BackupToCloud]: https://github.com/seaweedfs/seaweedfs/wiki/Async-Replication-to-Cloud
|
||||
[Hadoop]: https://github.com/seaweedfs/seaweedfs/wiki/Hadoop-Compatible-File-System
|
||||
[WebDAV]: https://github.com/seaweedfs/seaweedfs/wiki/WebDAV
|
||||
[ErasureCoding]: https://github.com/seaweedfs/seaweedfs/wiki/Erasure-coding-for-warm-storage
|
||||
[TieredStorage]: https://github.com/seaweedfs/seaweedfs/wiki/Tiered-Storage
|
||||
[CloudTier]: https://github.com/seaweedfs/seaweedfs/wiki/Cloud-Tier
|
||||
[FilerDataEncryption]: https://github.com/seaweedfs/seaweedfs/wiki/Filer-Data-Encryption
|
||||
[FilerTTL]: https://github.com/seaweedfs/seaweedfs/wiki/Filer-Stores
|
||||
[VolumeServerTTL]: https://github.com/seaweedfs/seaweedfs/wiki/Store-file-with-a-Time-To-Live
|
||||
[SeaweedFsCsiDriver]: https://github.com/seaweedfs/seaweedfs-csi-driver
|
||||
[ActiveActiveAsyncReplication]: https://github.com/seaweedfs/seaweedfs/wiki/Filer-Active-Active-cross-cluster-continuous-synchronization
|
||||
[FilerStoreReplication]: https://github.com/seaweedfs/seaweedfs/wiki/Filer-Store-Replication
|
||||
[KeyLargeValueStore]: https://github.com/seaweedfs/seaweedfs/wiki/Filer-as-a-Key-Large-Value-Store
|
||||
[CloudDrive]: https://github.com/seaweedfs/seaweedfs/wiki/Cloud-Drive-Architecture
|
||||
[GatewayToRemoteObjectStore]: https://github.com/seaweedfs/seaweedfs/wiki/Gateway-to-Remote-Object-Storage
|
||||
* [File TTL][FilerTTL] automatically purge file metadata and actual file data.
|
||||
|
||||
[Filer]: https://github.com/chrislusf/seaweedfs/wiki/Directories-and-Files
|
||||
[Mount]: https://github.com/chrislusf/seaweedfs/wiki/Mount
|
||||
[AmazonS3API]: https://github.com/chrislusf/seaweedfs/wiki/Amazon-S3-API
|
||||
[BackupToCloud]: https://github.com/chrislusf/seaweedfs/wiki/Backup-to-Cloud
|
||||
[Hadoop]: https://github.com/chrislusf/seaweedfs/wiki/Hadoop-Compatible-File-System
|
||||
[WebDAV]: https://github.com/chrislusf/seaweedfs/wiki/WebDAV
|
||||
[ErasureCoding]: https://github.com/chrislusf/seaweedfs/wiki/Erasure-coding-for-warm-storage
|
||||
[CloudTier]: https://github.com/chrislusf/seaweedfs/wiki/Cloud-Tier
|
||||
[FilerDataEncryption]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Data-Encryption
|
||||
[FilerTTL]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Stores
|
||||
[VolumeServerTTL]: https://github.com/chrislusf/seaweedfs/wiki/Store-file-with-a-Time-To-Live
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Example: Using Seaweed Object Store ##
|
||||
## Example Usage ##
|
||||
|
||||
By default, the master node runs on port 9333, and the volume nodes run on port 8080.
|
||||
Let's start one master node, and two volume nodes on port 8080 and 8081. Ideally, they should be started from different machines. We'll use localhost as an example.
|
||||
|
@ -212,7 +157,7 @@ SeaweedFS uses HTTP REST operations to read, write, and delete. The responses ar
|
|||
|
||||
### Write File ###
|
||||
|
||||
To upload a file: first, send a HTTP POST, PUT, or GET request to `/dir/assign` to get an `fid` and a volume server URL:
|
||||
To upload a file: first, send a HTTP POST, PUT, or GET request to `/dir/assign` to get an `fid` and a volume server url:
|
||||
|
||||
```
|
||||
> curl http://localhost:9333/dir/assign
|
||||
|
@ -261,7 +206,7 @@ First look up the volume server's URLs by the file's volumeId:
|
|||
|
||||
Since (usually) there are not too many volume servers, and volumes don't move often, you can cache the results most of the time. Depending on the replication type, one volume can have multiple replica locations. Just randomly pick one location to read.
|
||||
|
||||
Now you can take the public URL, render the URL or directly read from the volume server via URL:
|
||||
Now you can take the public url, render the url or directly read from the volume server via url:
|
||||
|
||||
```
|
||||
http://localhost:8080/3,01637037d6.jpg
|
||||
|
@ -308,7 +253,7 @@ The replication parameter options are:
|
|||
|
||||
More details about replication can be found [on the wiki][Replication].
|
||||
|
||||
[Replication]: https://github.com/seaweedfs/seaweedfs/wiki/Replication
|
||||
[Replication]: https://github.com/chrislusf/seaweedfs/wiki/Replication
|
||||
|
||||
You can also set the default replication strategy when starting the master server.
|
||||
|
||||
|
@ -333,14 +278,14 @@ When requesting a file key, an optional "dataCenter" parameter can limit the ass
|
|||
* [Chunking large files][feat-3]
|
||||
* [Collection as a Simple Name Space][feat-4]
|
||||
|
||||
[feat-1]: https://github.com/seaweedfs/seaweedfs/wiki/Failover-Master-Server
|
||||
[feat-2]: https://github.com/seaweedfs/seaweedfs/wiki/Optimization#insert-with-your-own-keys
|
||||
[feat-3]: https://github.com/seaweedfs/seaweedfs/wiki/Optimization#upload-large-files
|
||||
[feat-4]: https://github.com/seaweedfs/seaweedfs/wiki/Optimization#collection-as-a-simple-name-space
|
||||
[feat-1]: https://github.com/chrislusf/seaweedfs/wiki/Failover-Master-Server
|
||||
[feat-2]: https://github.com/chrislusf/seaweedfs/wiki/Optimization#insert-with-your-own-keys
|
||||
[feat-3]: https://github.com/chrislusf/seaweedfs/wiki/Optimization#upload-large-files
|
||||
[feat-4]: https://github.com/chrislusf/seaweedfs/wiki/Optimization#collection-as-a-simple-name-space
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Object Store Architecture ##
|
||||
## Architecture ##
|
||||
|
||||
Usually distributed file systems split each file into chunks, a central master keeps a mapping of filenames, chunk indices to chunk handles, and also which chunks each chunk server has.
|
||||
|
||||
|
@ -362,9 +307,9 @@ On each write request, the master server also generates a file key, which is a g
|
|||
|
||||
### Write and Read files ###
|
||||
|
||||
When a client sends a write request, the master server returns (volume id, file key, file cookie, volume node URL) for the file. The client then contacts the volume node and POSTs the file content.
|
||||
When a client sends a write request, the master server returns (volume id, file key, file cookie, volume node url) for the file. The client then contacts the volume node and POSTs the file content.
|
||||
|
||||
When a client needs to read a file based on (volume id, file key, file cookie), it asks the master server by the volume id for the (volume node URL, volume node public URL), or retrieves this from a cache. Then the client can GET the content, or just render the URL on web pages and let browsers fetch the content.
|
||||
When a client needs to read a file based on (volume id, file key, file cookie), it asks the master server by the volume id for the (volume node url, volume node public url), or retrieves this from a cache. Then the client can GET the content, or just render the URL on web pages and let browsers fetch the content.
|
||||
|
||||
Please see the example for details on the write-read process.
|
||||
|
||||
|
@ -378,7 +323,7 @@ Each individual file size is limited to the volume size.
|
|||
|
||||
### Saving memory ###
|
||||
|
||||
All file meta information stored on a volume server is readable from memory without disk access. Each file takes just a 16-byte map entry of <64bit key, 32bit offset, 32bit size>. Of course, each map entry has its own space cost for the map. But usually the disk space runs out before the memory does.
|
||||
All file meta information stored on an volume server is readable from memory without disk access. Each file takes just a 16-byte map entry of <64bit key, 32bit offset, 32bit size>. Of course, each map entry has its own space cost for the map. But usually the disk space runs out before the memory does.
|
||||
|
||||
### Tiered Storage to the cloud ###
|
||||
|
||||
|
@ -388,7 +333,7 @@ Usually hot data are fresh and warm data are old. SeaweedFS puts the newly creat
|
|||
|
||||
With the O(1) access time, the network latency cost is kept at minimum.
|
||||
|
||||
If the hot/warm data is split as 20/80, with 20 servers, you can achieve storage capacity of 100 servers. That's a cost saving of 80%! Or you can repurpose the 80 servers to store new data also, and get 5X storage throughput.
|
||||
If the hot~warm data is split as 20~80, with 20 servers, you can achieve storage capacity of 100 servers. That's a cost saving of 80%! Or you can repurpose the 80 servers to store new data also, and get 5X storage throughput.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
|
@ -398,8 +343,6 @@ Most other distributed file systems seem more complicated than necessary.
|
|||
|
||||
SeaweedFS is meant to be fast and simple, in both setup and operation. If you do not understand how it works when you reach here, we've failed! Please raise an issue with any questions or update this file with clarifications.
|
||||
|
||||
SeaweedFS is constantly moving forward. Same with other systems. These comparisons can be outdated quickly. Please help to keep them updated.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
### Compared to HDFS ###
|
||||
|
@ -418,17 +361,16 @@ The architectures are mostly the same. SeaweedFS aims to store and read files fa
|
|||
|
||||
* SeaweedFS optimizes for small files, ensuring O(1) disk seek operation, and can also handle large files.
|
||||
* SeaweedFS statically assigns a volume id for a file. Locating file content becomes just a lookup of the volume id, which can be easily cached.
|
||||
* SeaweedFS Filer metadata store can be any well-known and proven data store, e.g., Redis, Cassandra, HBase, Mongodb, Elastic Search, MySql, Postgres, Sqlite, MemSql, TiDB, CockroachDB, Etcd, YDB etc, and is easy to customize.
|
||||
* SeaweedFS Filer metadata store can be any well-known and proven data stores, e.g., Cassandra, Mongodb, Redis, Etcd, MySql, Postgres, MemSql, TiDB, CockroachDB, etc, and is easy to customized.
|
||||
* SeaweedFS Volume server also communicates directly with clients via HTTP, supporting range queries, direct uploads, etc.
|
||||
|
||||
| System | File Metadata | File Content Read| POSIX | REST API | Optimized for large number of small files |
|
||||
| System | File Meta | File Content Read| POSIX | REST API | Optimized for small files |
|
||||
| ------------- | ------------------------------- | ---------------- | ------ | -------- | ------------------------- |
|
||||
| SeaweedFS | lookup volume id, cacheable | O(1) disk seek | | Yes | Yes |
|
||||
| SeaweedFS Filer| Linearly Scalable, Customizable | O(1) disk seek | FUSE | Yes | Yes |
|
||||
| GlusterFS | hashing | | FUSE, NFS | | |
|
||||
| Ceph | hashing + rules | | FUSE | Yes | |
|
||||
| MooseFS | in memory | | FUSE | | No |
|
||||
| MinIO | separate meta file for each file | | | Yes | No |
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
|
@ -450,17 +392,17 @@ MooseFS Master Server keeps all meta data in memory. Same issue as HDFS namenode
|
|||
|
||||
### Compared to Ceph ###
|
||||
|
||||
Ceph can be setup similar to SeaweedFS as a key->blob store. It is much more complicated, with the need to support layers on top of it. [Here is a more detailed comparison](https://github.com/seaweedfs/seaweedfs/issues/120)
|
||||
Ceph can be setup similar to SeaweedFS as a key->blob store. It is much more complicated, with the need to support layers on top of it. [Here is a more detailed comparison](https://github.com/chrislusf/seaweedfs/issues/120)
|
||||
|
||||
SeaweedFS has a centralized master group to look up free volumes, while Ceph uses hashing and metadata servers to locate its objects. Having a centralized master makes it easy to code and manage.
|
||||
|
||||
Ceph, like SeaweedFS, is based on the object store RADOS. Ceph is rather complicated with mixed reviews.
|
||||
Same as SeaweedFS, Ceph is also based on the object store RADOS. Ceph is rather complicated with mixed reviews.
|
||||
|
||||
Ceph uses CRUSH hashing to automatically manage data placement, which is efficient to locate the data. But the data has to be placed according to the CRUSH algorithm. Any wrong configuration would cause data loss. Topology changes, such as adding new servers to increase capacity, will cause data migration with high IO cost to fit the CRUSH algorithm. SeaweedFS places data by assigning them to any writable volumes. If writes to one volume failed, just pick another volume to write. Adding more volumes is also as simple as it can be.
|
||||
Ceph uses CRUSH hashing to automatically manage the data placement. SeaweedFS places data by assigned volumes.
|
||||
|
||||
SeaweedFS is optimized for small files. Small files are stored as one continuous block of content, with at most 8 unused bytes between files. Small file access is O(1) disk read.
|
||||
|
||||
SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Sqlite, Mongodb, Redis, Elastic Search, Cassandra, HBase, MemSql, TiDB, CockroachCB, Etcd, YDB, to manage file directories. These stores are proven, scalable, and easier to manage.
|
||||
SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Mongodb, Redis, Etcd, Cassandra, MemSql, TiDB, CockroachCB, to manage file directories. These stores are proven, scalable, and easier to manage.
|
||||
|
||||
| SeaweedFS | comparable to Ceph | advantage |
|
||||
| ------------- | ------------- | ---------------- |
|
||||
|
@ -470,31 +412,19 @@ SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Sqlite, Mong
|
|||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
### Compared to MinIO ###
|
||||
|
||||
MinIO follows AWS S3 closely and is ideal for testing for S3 API. It has good UI, policies, versionings, etc. SeaweedFS is trying to catch up here. It is also possible to put MinIO as a gateway in front of SeaweedFS later.
|
||||
|
||||
MinIO metadata are in simple files. Each file write will incur extra writes to corresponding meta file.
|
||||
|
||||
MinIO does not have optimization for lots of small files. The files are simply stored as is to local disks.
|
||||
Plus the extra meta file and shards for erasure coding, it only amplifies the LOSF problem.
|
||||
|
||||
MinIO has multiple disk IO to read one file. SeaweedFS has O(1) disk reads, even for erasure coded files.
|
||||
|
||||
MinIO has full-time erasure coding. SeaweedFS uses replication on hot data for faster speed and optionally applies erasure coding on warm data.
|
||||
|
||||
MinIO does not have POSIX-like API support.
|
||||
|
||||
MinIO has specific requirements on storage layout. It is not flexible to adjust capacity. In SeaweedFS, just start one volume server pointing to the master. That's all.
|
||||
|
||||
## Dev Plan ##
|
||||
|
||||
* More tools and documentation, on how to manage and scale the system.
|
||||
* Read and write stream data.
|
||||
* Support structured data.
|
||||
More tools and documentation, on how to maintain and scale the system. For example, how to move volumes, automatically balancing data, how to grow volumes, how to check system status, etc.
|
||||
Other key features include: Erasure Encoding, JWT security.
|
||||
|
||||
This is a super exciting project! And we need helpers and [support](https://www.patreon.com/seaweedfs)!
|
||||
|
||||
BTW, We suggest run the code style check script `util/gostd` before you push your branch to remote, it will make SeaweedFS easy to review, maintain and develop:
|
||||
|
||||
```
|
||||
$ ./util/gostd
|
||||
```
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Installation Guide ##
|
||||
|
@ -505,21 +435,33 @@ Step 1: install go on your machine and setup the environment by following the in
|
|||
|
||||
https://golang.org/doc/install
|
||||
|
||||
make sure to define your $GOPATH
|
||||
make sure you set up your $GOPATH
|
||||
|
||||
|
||||
Step 2: checkout this repo:
|
||||
```bash
|
||||
git clone https://github.com/seaweedfs/seaweedfs.git
|
||||
```
|
||||
Step 2: also you may need to install Mercurial by following the instructions at:
|
||||
|
||||
http://mercurial.selenic.com/downloads
|
||||
|
||||
Step 3: download, compile, and install the project by executing the following command
|
||||
|
||||
```bash
|
||||
cd seaweedfs/weed && make install
|
||||
go get github.com/chrislusf/seaweedfs/weed
|
||||
```
|
||||
|
||||
Once this is done, you will find the executable "weed" in your `$GOPATH/bin` directory
|
||||
|
||||
Note:
|
||||
* If you got into this problem, try to `rm -Rf $GOPATH/src/go.etcd.io/etcd/vendor/golang.org/x/net/trace` and build again.
|
||||
```
|
||||
panic: /debug/requests is already registered. You may have two independent copies of golang.org/x/net/trace in your binary, trying to maintain separate state. This may involve a vendored copy of golang.org/x/net/trace.
|
||||
```
|
||||
|
||||
Step 4: after you modify your code locally, you could start a local build by calling `go install` under
|
||||
|
||||
```
|
||||
$GOPATH/src/github.com/chrislusf/seaweedfs/weed
|
||||
```
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Disk Related Topics ##
|
||||
|
@ -542,7 +484,7 @@ Write 1 million 1KB file:
|
|||
```
|
||||
Concurrency Level: 16
|
||||
Time taken for tests: 66.753 seconds
|
||||
Completed requests: 1048576
|
||||
Complete requests: 1048576
|
||||
Failed requests: 0
|
||||
Total transferred: 1106789009 bytes
|
||||
Requests per second: 15708.23 [#/sec]
|
||||
|
@ -568,7 +510,7 @@ Randomly read 1 million files:
|
|||
```
|
||||
Concurrency Level: 16
|
||||
Time taken for tests: 22.301 seconds
|
||||
Completed requests: 1048576
|
||||
Complete requests: 1048576
|
||||
Failed requests: 0
|
||||
Total transferred: 1106812873 bytes
|
||||
Requests per second: 47019.38 [#/sec]
|
||||
|
@ -586,78 +528,6 @@ Percentage of the requests served within a certain time (ms)
|
|||
100% 54.1 ms
|
||||
```
|
||||
|
||||
### Run WARP and launch a mixed benchmark. ###
|
||||
|
||||
```
|
||||
make benchmark
|
||||
warp: Benchmark data written to "warp-mixed-2023-10-16[102354]-l70a.csv.zst"
|
||||
Mixed operations.
|
||||
Operation: DELETE, 10%, Concurrency: 20, Ran 4m59s.
|
||||
* Throughput: 6.19 obj/s
|
||||
|
||||
Operation: GET, 45%, Concurrency: 20, Ran 5m0s.
|
||||
* Throughput: 279.85 MiB/s, 27.99 obj/s
|
||||
|
||||
Operation: PUT, 15%, Concurrency: 20, Ran 5m0s.
|
||||
* Throughput: 89.86 MiB/s, 8.99 obj/s
|
||||
|
||||
Operation: STAT, 30%, Concurrency: 20, Ran 5m0s.
|
||||
* Throughput: 18.63 obj/s
|
||||
|
||||
Cluster Total: 369.74 MiB/s, 61.79 obj/s, 0 errors over 5m0s.
|
||||
```
|
||||
|
||||
To see segmented request statistics, use the --analyze.v parameter.
|
||||
```
|
||||
warp analyze --analyze.v warp-mixed-2023-10-16[102354]-l70a.csv.zst
|
||||
18642 operations loaded... Done!
|
||||
Mixed operations.
|
||||
----------------------------------------
|
||||
Operation: DELETE - total: 1854, 10.0%, Concurrency: 20, Ran 5m0s, starting 2023-10-16 10:23:57.115 +0500 +05
|
||||
* Throughput: 6.19 obj/s
|
||||
|
||||
Requests considered: 1855:
|
||||
* Avg: 104ms, 50%: 30ms, 90%: 207ms, 99%: 1.355s, Fastest: 1ms, Slowest: 4.613s, StdDev: 320ms
|
||||
|
||||
----------------------------------------
|
||||
Operation: GET - total: 8388, 45.3%, Size: 10485760 bytes. Concurrency: 20, Ran 5m0s, starting 2023-10-16 10:23:57.12 +0500 +05
|
||||
* Throughput: 279.77 MiB/s, 27.98 obj/s
|
||||
|
||||
Requests considered: 8389:
|
||||
* Avg: 221ms, 50%: 106ms, 90%: 492ms, 99%: 1.739s, Fastest: 8ms, Slowest: 8.633s, StdDev: 383ms
|
||||
* TTFB: Avg: 81ms, Best: 2ms, 25th: 24ms, Median: 39ms, 75th: 65ms, 90th: 171ms, 99th: 669ms, Worst: 4.783s StdDev: 163ms
|
||||
* First Access: Avg: 240ms, 50%: 105ms, 90%: 511ms, 99%: 2.08s, Fastest: 12ms, Slowest: 8.633s, StdDev: 480ms
|
||||
* First Access TTFB: Avg: 88ms, Best: 2ms, 25th: 24ms, Median: 38ms, 75th: 64ms, 90th: 179ms, 99th: 919ms, Worst: 4.783s StdDev: 199ms
|
||||
* Last Access: Avg: 219ms, 50%: 106ms, 90%: 463ms, 99%: 1.782s, Fastest: 9ms, Slowest: 8.633s, StdDev: 416ms
|
||||
* Last Access TTFB: Avg: 81ms, Best: 2ms, 25th: 24ms, Median: 39ms, 75th: 65ms, 90th: 161ms, 99th: 657ms, Worst: 4.783s StdDev: 176ms
|
||||
|
||||
----------------------------------------
|
||||
Operation: PUT - total: 2688, 14.5%, Size: 10485760 bytes. Concurrency: 20, Ran 5m0s, starting 2023-10-16 10:23:57.115 +0500 +05
|
||||
* Throughput: 89.83 MiB/s, 8.98 obj/s
|
||||
|
||||
Requests considered: 2689:
|
||||
* Avg: 1.165s, 50%: 878ms, 90%: 2.015s, 99%: 5.74s, Fastest: 99ms, Slowest: 8.264s, StdDev: 968ms
|
||||
|
||||
----------------------------------------
|
||||
Operation: STAT - total: 5586, 30.2%, Concurrency: 20, Ran 5m0s, starting 2023-10-16 10:23:57.113 +0500 +05
|
||||
* Throughput: 18.63 obj/s
|
||||
|
||||
Requests considered: 5587:
|
||||
* Avg: 15ms, 50%: 11ms, 90%: 34ms, 99%: 80ms, Fastest: 0s, Slowest: 245ms, StdDev: 17ms
|
||||
* First Access: Avg: 14ms, 50%: 10ms, 90%: 33ms, 99%: 69ms, Fastest: 0s, Slowest: 203ms, StdDev: 16ms
|
||||
* Last Access: Avg: 15ms, 50%: 11ms, 90%: 34ms, 99%: 74ms, Fastest: 0s, Slowest: 203ms, StdDev: 17ms
|
||||
|
||||
Cluster Total: 369.64 MiB/s, 61.77 obj/s, 0 errors over 5m0s.
|
||||
Total Errors:0.
|
||||
```
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Enterprise ##
|
||||
|
||||
For enterprise users, please visit [seaweedfs.com](https://seaweedfs.com) for the SeaweedFS Enterprise Edition,
|
||||
which has a self-healing storage format with better data protection.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## License ##
|
||||
|
@ -678,6 +548,6 @@ The text of this page is available for modification and reuse under the terms of
|
|||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Stargazers over time
|
||||
## Stargazers over time ##
|
||||
|
||||
[](https://starchart.cc/chrislusf/seaweedfs)
|
||||
[](https://starcharts.herokuapp.com/chrislusf/seaweedfs)
|
||||
|
|
15
backers.md
15
backers.md
|
@ -1,23 +1,14 @@
|
|||
|
||||
<h1 align="center">Sponsors & Backers</h1>
|
||||
|
||||
- [Become a backer or sponsor on Patreon](https://www.patreon.com/seaweedfs).
|
||||
|
||||
<h2 align="center">Generous Backers ($50+)</h2>
|
||||
|
||||
- [4Sight Imaging](https://www.4sightimaging.com/)
|
||||
- [Evercam Camera Management Software](https://evercam.io/)
|
||||
- [Spherical Elephant GmbH](https://www.sphericalelephant.com)
|
||||
- [WizardTales GmbH](https://www.wizardtales.com)
|
||||
- [Nimbus Web Services](https://nimbusws.com)
|
||||
- [Admiral](https://getadmiral.com)
|
||||
|
||||
- <h2 align="center">Backers</h2>
|
||||
<h2 align="center">Backers</h2>
|
||||
|
||||
- [ColorfulClouds Tech Co. Ltd.](https://caiyunai.com/)
|
||||
- [Haravan - Ecommerce Platform](https://www.haravan.com)
|
||||
- PeterCxy - Creator of Shelter App
|
||||
- [Hive Games](https://playhive.com/)
|
||||
- Flowm
|
||||
- Yoni Nakache
|
||||
- Catalin Constantin
|
||||
- MingLi Yuan
|
||||
- Leroy van Logchem
|
||||
|
|
44
docker/Dockerfile
Normal file
44
docker/Dockerfile
Normal file
|
@ -0,0 +1,44 @@
|
|||
FROM frolvlad/alpine-glibc
|
||||
|
||||
# Supercronic install settings
|
||||
ENV SUPERCRONIC_URL=https://github.com/aptible/supercronic/releases/download/v0.1.8/supercronic-linux-amd64 \
|
||||
SUPERCRONIC=supercronic-linux-amd64 \
|
||||
SUPERCRONIC_SHA1SUM=be43e64c45acd6ec4fce5831e03759c89676a0ea
|
||||
|
||||
# Install SeaweedFS and Supercronic ( for cron job mode )
|
||||
# Tried to use curl only (curl -o /tmp/linux_amd64.tar.gz ...), however it turned out that the following tar command failed with "gzip: stdin: not in gzip format"
|
||||
RUN apk add --no-cache --virtual build-dependencies --update wget curl ca-certificates && \
|
||||
wget -P /tmp https://github.com/$(curl -s -L https://github.com/chrislusf/seaweedfs/releases/latest | egrep -o 'chrislusf/seaweedfs/releases/download/.*/linux_amd64.tar.gz') && \
|
||||
tar -C /usr/bin/ -xzvf /tmp/linux_amd64.tar.gz && \
|
||||
curl -fsSLO "$SUPERCRONIC_URL" && \
|
||||
echo "${SUPERCRONIC_SHA1SUM} ${SUPERCRONIC}" | sha1sum -c - && \
|
||||
chmod +x "$SUPERCRONIC" && \
|
||||
mv "$SUPERCRONIC" "/usr/local/bin/${SUPERCRONIC}" && \
|
||||
ln -s "/usr/local/bin/${SUPERCRONIC}" /usr/local/bin/supercronic && \
|
||||
apk del build-dependencies && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# volume server gprc port
|
||||
EXPOSE 18080
|
||||
# volume server http port
|
||||
EXPOSE 8080
|
||||
# filer server gprc port
|
||||
EXPOSE 18888
|
||||
# filer server http port
|
||||
EXPOSE 8888
|
||||
# master server shared gprc port
|
||||
EXPOSE 19333
|
||||
# master server shared http port
|
||||
EXPOSE 9333
|
||||
# s3 server http port
|
||||
EXPOSE 8333
|
||||
|
||||
RUN mkdir -p /data/filerldb2
|
||||
|
||||
VOLUME /data
|
||||
|
||||
COPY filer.toml /etc/seaweedfs/filer.toml
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
|
@ -1,30 +0,0 @@
|
|||
FROM ubuntu:22.04
|
||||
|
||||
LABEL author="Chris Lu"
|
||||
|
||||
RUN apt-get update && apt-get install -y curl fio fuse
|
||||
RUN mkdir -p /etc/seaweedfs /data/filerldb2
|
||||
|
||||
COPY ./weed /usr/bin/
|
||||
COPY ./filer.toml /etc/seaweedfs/filer.toml
|
||||
COPY ./entrypoint.sh /entrypoint.sh
|
||||
|
||||
# volume server grpc port
|
||||
EXPOSE 18080
|
||||
# volume server http port
|
||||
EXPOSE 8080
|
||||
# filer server grpc port
|
||||
EXPOSE 18888
|
||||
# filer server http port
|
||||
EXPOSE 8888
|
||||
# master server shared grpc port
|
||||
EXPOSE 19333
|
||||
# master server shared http port
|
||||
EXPOSE 9333
|
||||
|
||||
VOLUME /data
|
||||
WORKDIR /data
|
||||
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
|
@ -1,21 +1,15 @@
|
|||
FROM golang:1.24-alpine as builder
|
||||
RUN apk add git g++ fuse
|
||||
RUN mkdir -p /go/src/github.com/seaweedfs/
|
||||
RUN git clone https://github.com/seaweedfs/seaweedfs /go/src/github.com/seaweedfs/seaweedfs
|
||||
ARG BRANCH=${BRANCH:-master}
|
||||
ARG TAGS
|
||||
RUN cd /go/src/github.com/seaweedfs/seaweedfs && git checkout $BRANCH
|
||||
RUN cd /go/src/github.com/seaweedfs/seaweedfs/weed \
|
||||
&& export LDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=$(git rev-parse --short HEAD)" \
|
||||
&& CGO_ENABLED=0 go install -tags "$TAGS" -ldflags "-extldflags -static ${LDFLAGS}"
|
||||
FROM frolvlad/alpine-glibc as builder
|
||||
RUN apk add git go g++
|
||||
RUN mkdir -p /go/src/github.com/chrislusf/
|
||||
RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs
|
||||
RUN cd /go/src/github.com/chrislusf/seaweedfs/weed && go install
|
||||
|
||||
FROM alpine AS final
|
||||
LABEL author="Chris Lu"
|
||||
COPY --from=builder /go/bin/weed /usr/bin/
|
||||
COPY --from=builder /root/go/bin/weed /usr/bin/
|
||||
RUN mkdir -p /etc/seaweedfs
|
||||
COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml
|
||||
COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/entrypoint.sh /entrypoint.sh
|
||||
RUN apk add fuse # for weed mount
|
||||
COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml
|
||||
COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh
|
||||
|
||||
# volume server gprc port
|
||||
EXPOSE 18080
|
||||
|
@ -31,13 +25,10 @@ EXPOSE 19333
|
|||
EXPOSE 9333
|
||||
# s3 server http port
|
||||
EXPOSE 8333
|
||||
# webdav server http port
|
||||
EXPOSE 7333
|
||||
|
||||
RUN mkdir -p /data/filerldb2
|
||||
|
||||
VOLUME /data
|
||||
WORKDIR /data
|
||||
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
|
|
|
@ -1,13 +1,9 @@
|
|||
FROM alpine AS final
|
||||
LABEL author="Chris Lu"
|
||||
COPY ./weed /usr/bin/
|
||||
COPY ./weed_pub* /usr/bin/
|
||||
COPY ./weed_sub* /usr/bin/
|
||||
RUN mkdir -p /etc/seaweedfs
|
||||
COPY ./filer.toml /etc/seaweedfs/filer.toml
|
||||
COPY ./entrypoint.sh /entrypoint.sh
|
||||
RUN apk add fuse # for weed mount
|
||||
RUN apk add curl # for health checks
|
||||
|
||||
# volume server grpc port
|
||||
EXPOSE 18080
|
||||
|
@ -23,13 +19,10 @@ EXPOSE 19333
|
|||
EXPOSE 9333
|
||||
# s3 server http port
|
||||
EXPOSE 8333
|
||||
# webdav server http port
|
||||
EXPOSE 7333
|
||||
|
||||
RUN mkdir -p /data/filerldb2
|
||||
|
||||
VOLUME /data
|
||||
WORKDIR /data
|
||||
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
|
|
|
@ -1,16 +0,0 @@
|
|||
FROM golang:1.24 as builder
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y build-essential libsnappy-dev zlib1g-dev libbz2-dev libgflags-dev liblz4-dev libzstd-dev
|
||||
|
||||
ENV ROCKSDB_VERSION v10.2.1
|
||||
|
||||
# build RocksDB
|
||||
RUN cd /tmp && \
|
||||
git clone https://github.com/facebook/rocksdb.git /tmp/rocksdb --depth 1 --single-branch --branch $ROCKSDB_VERSION && \
|
||||
cd rocksdb && \
|
||||
PORTABLE=1 make static_lib && \
|
||||
make install-static
|
||||
|
||||
ENV CGO_CFLAGS "-I/tmp/rocksdb/include"
|
||||
ENV CGO_LDFLAGS "-L/tmp/rocksdb -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy -llz4 -lzstd"
|
|
@ -1,61 +0,0 @@
|
|||
FROM golang:1.24 as builder
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y build-essential libsnappy-dev zlib1g-dev libbz2-dev libgflags-dev liblz4-dev libzstd-dev
|
||||
|
||||
ENV ROCKSDB_VERSION v10.2.1
|
||||
|
||||
# build RocksDB
|
||||
RUN cd /tmp && \
|
||||
git clone https://github.com/facebook/rocksdb.git /tmp/rocksdb --depth 1 --single-branch --branch $ROCKSDB_VERSION && \
|
||||
cd rocksdb && \
|
||||
PORTABLE=1 make static_lib && \
|
||||
make install-static
|
||||
|
||||
ENV CGO_CFLAGS "-I/tmp/rocksdb/include"
|
||||
ENV CGO_LDFLAGS "-L/tmp/rocksdb -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy -llz4 -lzstd"
|
||||
|
||||
# build SeaweedFS
|
||||
RUN mkdir -p /go/src/github.com/seaweedfs/
|
||||
RUN git clone https://github.com/seaweedfs/seaweedfs /go/src/github.com/seaweedfs/seaweedfs
|
||||
ARG BRANCH=${BRANCH:-master}
|
||||
RUN cd /go/src/github.com/seaweedfs/seaweedfs && git checkout $BRANCH
|
||||
RUN cd /go/src/github.com/seaweedfs/seaweedfs/weed \
|
||||
&& export LDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=$(git rev-parse --short HEAD)" \
|
||||
&& go install -tags "5BytesOffset rocksdb" -ldflags "-extldflags -static ${LDFLAGS}"
|
||||
|
||||
|
||||
FROM alpine AS final
|
||||
LABEL author="Chris Lu"
|
||||
COPY --from=builder /go/bin/weed /usr/bin/
|
||||
RUN mkdir -p /etc/seaweedfs
|
||||
COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/filer_rocksdb.toml /etc/seaweedfs/filer.toml
|
||||
COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/entrypoint.sh /entrypoint.sh
|
||||
RUN apk add fuse snappy gflags
|
||||
|
||||
# volume server gprc port
|
||||
EXPOSE 18080
|
||||
# volume server http port
|
||||
EXPOSE 8080
|
||||
# filer server gprc port
|
||||
EXPOSE 18888
|
||||
# filer server http port
|
||||
EXPOSE 8888
|
||||
# master server shared gprc port
|
||||
EXPOSE 19333
|
||||
# master server shared http port
|
||||
EXPOSE 9333
|
||||
# s3 server http port
|
||||
EXPOSE 8333
|
||||
# webdav server http port
|
||||
EXPOSE 7333
|
||||
|
||||
RUN mkdir -p /data/filer_rocksdb
|
||||
|
||||
VOLUME /data
|
||||
|
||||
WORKDIR /data
|
||||
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
|
@ -1,45 +0,0 @@
|
|||
FROM chrislusf/rocksdb_dev_env as builder
|
||||
|
||||
# build SeaweedFS
|
||||
RUN mkdir -p /go/src/github.com/seaweedfs/
|
||||
ADD . /go/src/github.com/seaweedfs/seaweedfs
|
||||
RUN ls -al /go/src/github.com/seaweedfs/ && \
|
||||
cd /go/src/github.com/seaweedfs/seaweedfs/weed \
|
||||
&& export LDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=$(git rev-parse --short HEAD)" \
|
||||
&& go install -tags "5BytesOffset rocksdb" -ldflags "-extldflags -static ${LDFLAGS}"
|
||||
|
||||
|
||||
FROM alpine AS final
|
||||
LABEL author="Chris Lu"
|
||||
COPY --from=builder /go/bin/weed /usr/bin/
|
||||
RUN mkdir -p /etc/seaweedfs
|
||||
COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/filer_rocksdb.toml /etc/seaweedfs/filer.toml
|
||||
COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/entrypoint.sh /entrypoint.sh
|
||||
RUN apk add fuse snappy gflags tmux
|
||||
|
||||
# volume server gprc port
|
||||
EXPOSE 18080
|
||||
# volume server http port
|
||||
EXPOSE 8080
|
||||
# filer server gprc port
|
||||
EXPOSE 18888
|
||||
# filer server http port
|
||||
EXPOSE 8888
|
||||
# master server shared gprc port
|
||||
EXPOSE 19333
|
||||
# master server shared http port
|
||||
EXPOSE 9333
|
||||
# s3 server http port
|
||||
EXPOSE 8333
|
||||
# webdav server http port
|
||||
EXPOSE 7333
|
||||
|
||||
RUN mkdir -p /data/filer_rocksdb
|
||||
|
||||
VOLUME /data
|
||||
|
||||
WORKDIR /data
|
||||
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
|
@ -1,31 +0,0 @@
|
|||
FROM ubuntu:20.04
|
||||
|
||||
RUN DEBIAN_FRONTEND=noninteractive apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get upgrade -y && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
|
||||
git \
|
||||
sudo \
|
||||
debianutils \
|
||||
python3-pip \
|
||||
python3-virtualenv \
|
||||
python3-dev \
|
||||
libevent-dev \
|
||||
libffi-dev \
|
||||
libxml2-dev \
|
||||
libxslt-dev \
|
||||
zlib1g-dev && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/* && \
|
||||
git clone https://github.com/ceph/s3-tests.git /opt/s3-tests
|
||||
|
||||
WORKDIR /opt/s3-tests
|
||||
RUN ./bootstrap
|
||||
|
||||
ENV \
|
||||
NOSETESTS_EXCLUDE="" \
|
||||
NOSETESTS_ATTR="" \
|
||||
NOSETESTS_OPTIONS="" \
|
||||
S3TEST_CONF="/s3tests.conf"
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "-c"]
|
||||
CMD ["sleep 30 && exec ./virtualenv/bin/nosetests ${NOSETESTS_OPTIONS-} ${NOSETESTS_ATTR:+-a $NOSETESTS_ATTR} ${NOSETESTS_EXCLUDE:+-e $NOSETESTS_EXCLUDE}"]
|
|
@ -1,17 +0,0 @@
|
|||
FROM tarantool/tarantool:3.3.1 AS builder
|
||||
|
||||
# install dependencies
|
||||
RUN apt update && \
|
||||
apt install -y git unzip cmake tt=2.7.0
|
||||
|
||||
# init tt dir structure, create dir for app, create symlink
|
||||
RUN tt init && \
|
||||
mkdir app && \
|
||||
ln -sfn ${PWD}/app/ ${PWD}/instances.enabled/app
|
||||
|
||||
# copy cluster configs
|
||||
COPY tarantool /opt/tarantool/app
|
||||
|
||||
# build app
|
||||
RUN tt build app
|
||||
|
122
docker/Makefile
122
docker/Makefile
|
@ -4,128 +4,16 @@ all: gen
|
|||
|
||||
gen: dev
|
||||
|
||||
cgo ?= 0
|
||||
binary:
|
||||
export SWCOMMIT=$(shell git rev-parse --short HEAD)
|
||||
export SWLDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=$(SWCOMMIT)"
|
||||
cd ../weed && CGO_ENABLED=$(cgo) GOOS=linux go build $(options) -tags "$(tags)" -ldflags "-s -w -extldflags -static $(SWLDFLAGS)" -o weed_binary && mv weed_binary ../docker/weed
|
||||
cd ../other/mq_client_example/agent_pub_record && CGO_ENABLED=$(cgo) GOOS=linux go build && mv agent_pub_record ../../../docker/
|
||||
cd ../other/mq_client_example/agent_sub_record && CGO_ENABLED=$(cgo) GOOS=linux go build && mv agent_sub_record ../../../docker/
|
||||
|
||||
binary_race: options = -race
|
||||
binary_race: cgo = 1
|
||||
binary_race: binary
|
||||
|
||||
build: binary
|
||||
build:
|
||||
cd ../weed; GOOS=linux go build; mv weed ../docker/
|
||||
docker build --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.local .
|
||||
|
||||
build_e2e: binary_race
|
||||
docker build --no-cache -t chrislusf/seaweedfs:e2e -f Dockerfile.e2e .
|
||||
|
||||
go_build: # make go_build tags=elastic,ydb,gocdk,hdfs,5BytesOffset,tarantool
|
||||
docker build --build-arg TAGS=$(tags) --no-cache -t chrislusf/seaweedfs:go_build -f Dockerfile.go_build .
|
||||
|
||||
go_build_large_disk:
|
||||
docker build --build-arg TAGS=large_disk --no-cache -t chrislusf/seaweedfs:large_disk -f Dockerfile.go_build .
|
||||
|
||||
build_rocksdb_dev_env:
|
||||
docker build --no-cache -t chrislusf/rocksdb_dev_env -f Dockerfile.rocksdb_dev_env .
|
||||
|
||||
build_rocksdb_local: build_rocksdb_dev_env
|
||||
cd .. ; docker build --no-cache -t chrislusf/seaweedfs:rocksdb_local -f docker/Dockerfile.rocksdb_large_local .
|
||||
|
||||
build_rocksdb:
|
||||
docker build --no-cache -t chrislusf/seaweedfs:rocksdb -f Dockerfile.rocksdb_large .
|
||||
|
||||
build_tarantool_dev_env:
|
||||
docker build --no-cache -t chrislusf/tarantool_dev_env -f Dockerfile.tarantool.dev_env .
|
||||
|
||||
s3tests_build:
|
||||
docker build --no-cache -t chrislusf/ceph-s3-tests:local -f Dockerfile.s3tests .
|
||||
rm ./weed
|
||||
|
||||
dev: build
|
||||
docker compose -f compose/local-dev-compose.yml -p seaweedfs up
|
||||
|
||||
dev_race: binary_race
|
||||
docker compose -f compose/local-dev-compose.yml -p seaweedfs up
|
||||
|
||||
dev_tls: build certstrap
|
||||
ENV_FILE="tls.env" docker compose -f compose/local-dev-compose.yml -p seaweedfs up
|
||||
|
||||
dev_mount: build
|
||||
docker compose -f compose/local-mount-compose.yml -p seaweedfs up
|
||||
|
||||
run_image: build
|
||||
docker run --rm -ti --device /dev/fuse --cap-add SYS_ADMIN --entrypoint /bin/sh chrislusf/seaweedfs:local
|
||||
|
||||
profile_mount: build
|
||||
docker compose -f compose/local-mount-profile-compose.yml -p seaweedfs up
|
||||
|
||||
k8s: build
|
||||
docker compose -f compose/local-k8s-compose.yml -p seaweedfs up
|
||||
|
||||
dev_registry: build
|
||||
docker compose -f compose/local-registry-compose.yml -p seaweedfs up
|
||||
|
||||
dev_replicate:
|
||||
docker build --build-arg TAGS=gocdk --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.go_build .
|
||||
docker compose -f compose/local-replicate-compose.yml -p seaweedfs up
|
||||
|
||||
dev_auditlog: build
|
||||
docker compose -f compose/local-auditlog-compose.yml -p seaweedfs up
|
||||
|
||||
dev_nextcloud: build
|
||||
docker compose -f compose/local-nextcloud-compose.yml -p seaweedfs up
|
||||
docker-compose -f local-dev-compose.yml -p seaweedfs up
|
||||
|
||||
cluster: build
|
||||
docker compose -f compose/local-cluster-compose.yml -p seaweedfs up
|
||||
|
||||
2clusters: build
|
||||
docker compose -f compose/local-clusters-compose.yml -p seaweedfs up
|
||||
|
||||
2mount: build
|
||||
docker compose -f compose/local-sync-mount-compose.yml -p seaweedfs up
|
||||
|
||||
filer_backup: build
|
||||
docker compose -f compose/local-filer-backup-compose.yml -p seaweedfs up
|
||||
|
||||
hashicorp_raft: build
|
||||
docker compose -f compose/local-hashicorp-raft-compose.yml -p seaweedfs up
|
||||
|
||||
s3tests: build s3tests_build
|
||||
docker compose -f compose/local-s3tests-compose.yml -p seaweedfs up
|
||||
|
||||
brokers: build
|
||||
docker compose -f compose/local-brokers-compose.yml -p seaweedfs up
|
||||
|
||||
agent: build
|
||||
docker compose -f compose/local-mq-test.yml -p seaweedfs up
|
||||
|
||||
filer_etcd: build
|
||||
docker stack deploy -c compose/swarm-etcd.yml fs
|
||||
|
||||
test_etcd: build
|
||||
docker compose -f compose/test-etcd-filer.yml -p seaweedfs up
|
||||
|
||||
test_ydb: tags = ydb
|
||||
test_ydb: build
|
||||
docker compose -f compose/test-ydb-filer.yml -p seaweedfs up
|
||||
|
||||
test_tarantool: tags = tarantool
|
||||
test_tarantool: build_tarantool_dev_env build
|
||||
docker compose -f compose/test-tarantool-filer.yml -p seaweedfs up
|
||||
docker-compose -f local-cluster-compose.yml -p seaweedfs up
|
||||
|
||||
clean:
|
||||
rm ./weed
|
||||
|
||||
certstrap:
|
||||
go install -v github.com/square/certstrap@latest
|
||||
certstrap --depot-path compose/tls init --curve P-256 --passphrase "" --common-name "SeaweedFS CA" || true
|
||||
certstrap --depot-path compose/tls request-cert --ou "SeaweedFS" --curve P-256 --passphrase "" --domain localhost --common-name volume01.dev || true
|
||||
certstrap --depot-path compose/tls request-cert --ou "SeaweedFS" --curve P-256 --passphrase "" --common-name master01.dev || true
|
||||
certstrap --depot-path compose/tls request-cert --ou "SeaweedFS" --curve P-256 --passphrase "" --common-name filer01.dev || true
|
||||
certstrap --depot-path compose/tls request-cert --ou "SeaweedFS" --curve P-256 --passphrase "" --common-name client01.dev || true
|
||||
certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" volume01.dev || true
|
||||
certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" master01.dev || true
|
||||
certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" filer01.dev || true
|
||||
certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" client01.dev || true
|
||||
|
|
|
@ -1,23 +1,13 @@
|
|||
# Docker
|
||||
|
||||
## Compose V2
|
||||
SeaweedFS now uses the `v2` syntax `docker compose`
|
||||
|
||||
If you rely on using Docker Compose as docker-compose (with a hyphen), you can set up Compose V2 to act as a drop-in replacement of the previous docker-compose. Refer to the [Installing Compose](https://docs.docker.com/compose/install/) section for detailed instructions on upgrading.
|
||||
|
||||
Confirm your system has docker compose v2 with a version check
|
||||
```bash
|
||||
$ docker compose version
|
||||
Docker Compose version v2.10.2
|
||||
```
|
||||
|
||||
## Try it out
|
||||
|
||||
```bash
|
||||
|
||||
wget https://raw.githubusercontent.com/seaweedfs/seaweedfs/master/docker/seaweedfs-compose.yml
|
||||
wget https://raw.githubusercontent.com/chrislusf/seaweedfs/master/docker/seaweedfs-compose.yml
|
||||
|
||||
docker compose -f seaweedfs-compose.yml -p seaweedfs up
|
||||
docker-compose -f seaweedfs-compose.yml -p seaweedfs up
|
||||
|
||||
```
|
||||
|
||||
|
@ -25,37 +15,15 @@ docker compose -f seaweedfs-compose.yml -p seaweedfs up
|
|||
|
||||
```bash
|
||||
|
||||
wget https://raw.githubusercontent.com/seaweedfs/seaweedfs/master/docker/seaweedfs-dev-compose.yml
|
||||
wget https://raw.githubusercontent.com/chrislusf/seaweedfs/master/docker/seaweedfs-dev-compose.yml
|
||||
|
||||
docker compose -f seaweedfs-dev-compose.yml -p seaweedfs up
|
||||
docker-compose -f seaweedfs-dev-compose.yml -p seaweedfs up
|
||||
|
||||
```
|
||||
|
||||
## Local Development
|
||||
|
||||
```bash
|
||||
cd $GOPATH/src/github.com/seaweedfs/seaweedfs/docker
|
||||
cd $GOPATH/src/github.com/chrislusf/seaweedfs/docker
|
||||
make
|
||||
```
|
||||
|
||||
### S3 cmd
|
||||
|
||||
list
|
||||
```
|
||||
s3cmd --no-ssl --host=127.0.0.1:8333 ls s3://
|
||||
```
|
||||
|
||||
## Build and push a multiarch build
|
||||
|
||||
Make sure that `docker buildx` is supported (might be an experimental docker feature)
|
||||
```bash
|
||||
BUILDER=$(docker buildx create --driver docker-container --use)
|
||||
docker buildx build --pull --push --platform linux/386,linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v6 . -t chrislusf/seaweedfs
|
||||
docker buildx stop $BUILDER
|
||||
```
|
||||
|
||||
## Minio debugging
|
||||
```
|
||||
mc config host add local http://127.0.0.1:9000 some_access_key1 some_secret_key1
|
||||
mc admin trace --all --verbose local
|
||||
```
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
FROM alpine:latest
|
||||
|
||||
# Install required packages
|
||||
RUN apk add --no-cache \
|
||||
ca-certificates \
|
||||
fuse \
|
||||
curl \
|
||||
jq
|
||||
|
||||
# Copy our locally built binary
|
||||
COPY weed-local /usr/bin/weed
|
||||
RUN chmod +x /usr/bin/weed
|
||||
|
||||
# Create working directory
|
||||
WORKDIR /data
|
||||
|
||||
# Default command
|
||||
ENTRYPOINT ["/usr/bin/weed"]
|
|
@ -1,438 +0,0 @@
|
|||
# SeaweedFS EC Worker Testing Environment
|
||||
|
||||
This Docker Compose setup provides a comprehensive testing environment for SeaweedFS Erasure Coding (EC) workers using **official SeaweedFS commands**.
|
||||
|
||||
## 📂 Directory Structure
|
||||
|
||||
The testing environment is located in `docker/admin_integration/` and includes:
|
||||
|
||||
```
|
||||
docker/admin_integration/
|
||||
├── Makefile # Main management interface
|
||||
├── docker-compose-ec-test.yml # Docker compose configuration
|
||||
├── EC-TESTING-README.md # This documentation
|
||||
└── run-ec-test.sh # Quick start script
|
||||
```
|
||||
|
||||
## 🏗️ Architecture
|
||||
|
||||
The testing environment uses **official SeaweedFS commands** and includes:
|
||||
|
||||
- **1 Master Server** (port 9333) - Coordinates the cluster with 50MB volume size limit
|
||||
- **6 Volume Servers** (ports 8080-8085) - Distributed across 2 data centers and 3 racks for diversity
|
||||
- **1 Filer** (port 8888) - Provides file system interface
|
||||
- **1 Admin Server** (port 23646) - Detects volumes needing EC and manages workers using official `admin` command
|
||||
- **3 EC Workers** - Execute erasure coding tasks using official `worker` command with task-specific working directories
|
||||
- **1 Load Generator** - Continuously writes and deletes files using SeaweedFS shell commands
|
||||
- **1 Monitor** - Tracks cluster health and EC progress using shell scripts
|
||||
|
||||
## ✨ New Features
|
||||
|
||||
### **Task-Specific Working Directories**
|
||||
Each worker now creates dedicated subdirectories for different task types:
|
||||
- `/work/erasure_coding/` - For EC encoding tasks
|
||||
- `/work/vacuum/` - For vacuum cleanup tasks
|
||||
- `/work/balance/` - For volume balancing tasks
|
||||
|
||||
This provides:
|
||||
- **Organization**: Each task type gets isolated working space
|
||||
- **Debugging**: Easy to find files/logs related to specific task types
|
||||
- **Cleanup**: Can clean up task-specific artifacts easily
|
||||
- **Concurrent Safety**: Different task types won't interfere with each other's files
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Docker and Docker Compose installed
|
||||
- GNU Make installed
|
||||
- At least 4GB RAM available for containers
|
||||
- Ports 8080-8085, 8888, 9333, 23646 available
|
||||
|
||||
### Start the Environment
|
||||
|
||||
```bash
|
||||
# Navigate to the admin integration directory
|
||||
cd docker/admin_integration/
|
||||
|
||||
# Show available commands
|
||||
make help
|
||||
|
||||
# Start the complete testing environment
|
||||
make start
|
||||
```
|
||||
|
||||
The `make start` command will:
|
||||
1. Start all services using official SeaweedFS images
|
||||
2. Configure workers with task-specific working directories
|
||||
3. Wait for services to be ready
|
||||
4. Display monitoring URLs and run health checks
|
||||
|
||||
### Alternative Commands
|
||||
|
||||
```bash
|
||||
# Quick start aliases
|
||||
make up # Same as 'make start'
|
||||
|
||||
# Development mode (higher load for faster testing)
|
||||
make dev-start
|
||||
|
||||
# Build images without starting
|
||||
make build
|
||||
```
|
||||
|
||||
## 📋 Available Make Targets
|
||||
|
||||
Run `make help` to see all available targets:
|
||||
|
||||
### **🚀 Main Operations**
|
||||
- `make start` - Start the complete EC testing environment
|
||||
- `make stop` - Stop all services
|
||||
- `make restart` - Restart all services
|
||||
- `make clean` - Complete cleanup (containers, volumes, images)
|
||||
|
||||
### **📊 Monitoring & Status**
|
||||
- `make health` - Check health of all services
|
||||
- `make status` - Show status of all containers
|
||||
- `make urls` - Display all monitoring URLs
|
||||
- `make monitor` - Open monitor dashboard in browser
|
||||
- `make monitor-status` - Show monitor status via API
|
||||
- `make volume-status` - Show volume status from master
|
||||
- `make admin-status` - Show admin server status
|
||||
- `make cluster-status` - Show complete cluster status
|
||||
|
||||
### **📋 Logs Management**
|
||||
- `make logs` - Show logs from all services
|
||||
- `make logs-admin` - Show admin server logs
|
||||
- `make logs-workers` - Show all worker logs
|
||||
- `make logs-worker1/2/3` - Show specific worker logs
|
||||
- `make logs-load` - Show load generator logs
|
||||
- `make logs-monitor` - Show monitor logs
|
||||
- `make backup-logs` - Backup all logs to files
|
||||
|
||||
### **⚖️ Scaling & Testing**
|
||||
- `make scale-workers WORKERS=5` - Scale workers to 5 instances
|
||||
- `make scale-load RATE=25` - Increase load generation rate
|
||||
- `make test-ec` - Run focused EC test scenario
|
||||
|
||||
### **🔧 Development & Debug**
|
||||
- `make shell-admin` - Open shell in admin container
|
||||
- `make shell-worker1` - Open shell in worker container
|
||||
- `make debug` - Show debug information
|
||||
- `make troubleshoot` - Run troubleshooting checks
|
||||
|
||||
## 📊 Monitoring URLs
|
||||
|
||||
| Service | URL | Description |
|
||||
|---------|-----|-------------|
|
||||
| Master UI | http://localhost:9333 | Cluster status and topology |
|
||||
| Filer | http://localhost:8888 | File operations |
|
||||
| Admin Server | http://localhost:23646/ | Task management |
|
||||
| Monitor | http://localhost:9999/status | Complete cluster monitoring |
|
||||
| Volume Servers | http://localhost:8080-8085/status | Individual volume server stats |
|
||||
|
||||
Quick access: `make urls` or `make monitor`
|
||||
|
||||
## 🔄 How EC Testing Works
|
||||
|
||||
### 1. Continuous Load Generation
|
||||
- **Write Rate**: 10 files/second (1-5MB each)
|
||||
- **Delete Rate**: 2 files/second
|
||||
- **Target**: Fill volumes to 50MB limit quickly
|
||||
|
||||
### 2. Volume Detection
|
||||
- Admin server scans master every 30 seconds
|
||||
- Identifies volumes >40MB (80% of 50MB limit)
|
||||
- Queues EC tasks for eligible volumes
|
||||
|
||||
### 3. EC Worker Assignment
|
||||
- **Worker 1**: EC specialist (max 2 concurrent tasks)
|
||||
- **Worker 2**: EC + Vacuum hybrid (max 2 concurrent tasks)
|
||||
- **Worker 3**: EC + Vacuum hybrid (max 1 concurrent task)
|
||||
|
||||
### 4. Comprehensive EC Process
|
||||
Each EC task follows 6 phases:
|
||||
1. **Copy Volume Data** (5-15%) - Stream .dat/.idx files locally
|
||||
2. **Mark Read-Only** (20-25%) - Ensure data consistency
|
||||
3. **Local Encoding** (30-60%) - Create 14 shards (10+4 Reed-Solomon)
|
||||
4. **Calculate Placement** (65-70%) - Smart rack-aware distribution
|
||||
5. **Distribute Shards** (75-90%) - Upload to optimal servers
|
||||
6. **Verify & Cleanup** (95-100%) - Validate and clean temporary files
|
||||
|
||||
### 5. Real-Time Monitoring
|
||||
- Volume analysis and EC candidate detection
|
||||
- Worker health and task progress
|
||||
- No data loss verification
|
||||
- Performance metrics
|
||||
|
||||
## 📋 Key Features Tested
|
||||
|
||||
### ✅ EC Implementation Features
|
||||
- [x] Local volume data copying with progress tracking
|
||||
- [x] Local Reed-Solomon encoding (10+4 shards)
|
||||
- [x] Intelligent shard placement with rack awareness
|
||||
- [x] Load balancing across available servers
|
||||
- [x] Backup server selection for redundancy
|
||||
- [x] Detailed step-by-step progress tracking
|
||||
- [x] Comprehensive error handling and recovery
|
||||
|
||||
### ✅ Infrastructure Features
|
||||
- [x] Multi-datacenter topology (dc1, dc2)
|
||||
- [x] Rack diversity (rack1, rack2, rack3)
|
||||
- [x] Volume size limits (50MB)
|
||||
- [x] Worker capability matching
|
||||
- [x] Health monitoring and alerting
|
||||
- [x] Continuous workload simulation
|
||||
|
||||
## 🛠️ Common Usage Patterns
|
||||
|
||||
### Basic Testing Workflow
|
||||
```bash
|
||||
# Start environment
|
||||
make start
|
||||
|
||||
# Watch progress
|
||||
make monitor-status
|
||||
|
||||
# Check for EC candidates
|
||||
make volume-status
|
||||
|
||||
# View worker activity
|
||||
make logs-workers
|
||||
|
||||
# Stop when done
|
||||
make stop
|
||||
```
|
||||
|
||||
### High-Load Testing
|
||||
```bash
|
||||
# Start with higher load
|
||||
make dev-start
|
||||
|
||||
# Scale up workers and load
|
||||
make scale-workers WORKERS=5
|
||||
make scale-load RATE=50
|
||||
|
||||
# Monitor intensive EC activity
|
||||
make logs-admin
|
||||
```
|
||||
|
||||
### Debugging Issues
|
||||
```bash
|
||||
# Check port conflicts and system state
|
||||
make troubleshoot
|
||||
|
||||
# View specific service logs
|
||||
make logs-admin
|
||||
make logs-worker1
|
||||
|
||||
# Get shell access for debugging
|
||||
make shell-admin
|
||||
make shell-worker1
|
||||
|
||||
# Check detailed status
|
||||
make debug
|
||||
```
|
||||
|
||||
### Development Iteration
|
||||
```bash
|
||||
# Quick restart after code changes
|
||||
make restart
|
||||
|
||||
# Rebuild and restart
|
||||
make clean
|
||||
make start
|
||||
|
||||
# Monitor specific components
|
||||
make logs-monitor
|
||||
```
|
||||
|
||||
## 📈 Expected Results
|
||||
|
||||
### Successful EC Testing Shows:
|
||||
1. **Volume Growth**: Steady increase in volume sizes toward 50MB limit
|
||||
2. **EC Detection**: Admin server identifies volumes >40MB for EC
|
||||
3. **Task Assignment**: Workers receive and execute EC tasks
|
||||
4. **Shard Distribution**: 14 shards distributed across 6 volume servers
|
||||
5. **No Data Loss**: All files remain accessible during and after EC
|
||||
6. **Performance**: EC tasks complete within estimated timeframes
|
||||
|
||||
### Sample Monitor Output:
|
||||
```bash
|
||||
# Check current status
|
||||
make monitor-status
|
||||
|
||||
# Output example:
|
||||
{
|
||||
"monitor": {
|
||||
"uptime": "15m30s",
|
||||
"master_addr": "master:9333",
|
||||
"admin_addr": "admin:9900"
|
||||
},
|
||||
"stats": {
|
||||
"VolumeCount": 12,
|
||||
"ECTasksDetected": 3,
|
||||
"WorkersActive": 3
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 🔧 Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
You can customize the environment by setting variables:
|
||||
|
||||
```bash
|
||||
# High load testing
|
||||
WRITE_RATE=25 DELETE_RATE=5 make start
|
||||
|
||||
# Extended test duration
|
||||
TEST_DURATION=7200 make start # 2 hours
|
||||
```
|
||||
|
||||
### Scaling Examples
|
||||
|
||||
```bash
|
||||
# Scale workers
|
||||
make scale-workers WORKERS=6
|
||||
|
||||
# Increase load generation
|
||||
make scale-load RATE=30
|
||||
|
||||
# Combined scaling
|
||||
make scale-workers WORKERS=4
|
||||
make scale-load RATE=40
|
||||
```
|
||||
|
||||
## 🧹 Cleanup Options
|
||||
|
||||
```bash
|
||||
# Stop services only
|
||||
make stop
|
||||
|
||||
# Remove containers but keep volumes
|
||||
make down
|
||||
|
||||
# Remove data volumes only
|
||||
make clean-volumes
|
||||
|
||||
# Remove built images only
|
||||
make clean-images
|
||||
|
||||
# Complete cleanup (everything)
|
||||
make clean
|
||||
```
|
||||
|
||||
## 🐛 Troubleshooting
|
||||
|
||||
### Quick Diagnostics
|
||||
```bash
|
||||
# Run complete troubleshooting
|
||||
make troubleshoot
|
||||
|
||||
# Check specific components
|
||||
make health
|
||||
make debug
|
||||
make status
|
||||
```
|
||||
|
||||
### Common Issues
|
||||
|
||||
**Services not starting:**
|
||||
```bash
|
||||
# Check port availability
|
||||
make troubleshoot
|
||||
|
||||
# View startup logs
|
||||
make logs-master
|
||||
make logs-admin
|
||||
```
|
||||
|
||||
**No EC tasks being created:**
|
||||
```bash
|
||||
# Check volume status
|
||||
make volume-status
|
||||
|
||||
# Increase load to fill volumes faster
|
||||
make scale-load RATE=30
|
||||
|
||||
# Check admin detection
|
||||
make logs-admin
|
||||
```
|
||||
|
||||
**Workers not responding:**
|
||||
```bash
|
||||
# Check worker registration
|
||||
make admin-status
|
||||
|
||||
# View worker logs
|
||||
make logs-workers
|
||||
|
||||
# Restart workers
|
||||
make restart
|
||||
```
|
||||
|
||||
### Performance Tuning
|
||||
|
||||
**For faster testing:**
|
||||
```bash
|
||||
make dev-start # Higher default load
|
||||
make scale-load RATE=50 # Very high load
|
||||
```
|
||||
|
||||
**For stress testing:**
|
||||
```bash
|
||||
make scale-workers WORKERS=8
|
||||
make scale-load RATE=100
|
||||
```
|
||||
|
||||
## 📚 Technical Details
|
||||
|
||||
### Network Architecture
|
||||
- Custom bridge network (172.20.0.0/16)
|
||||
- Service discovery via container names
|
||||
- Health checks for all services
|
||||
|
||||
### Storage Layout
|
||||
- Each volume server: max 100 volumes
|
||||
- Data centers: dc1, dc2
|
||||
- Racks: rack1, rack2, rack3
|
||||
- Volume limit: 50MB per volume
|
||||
|
||||
### EC Algorithm
|
||||
- Reed-Solomon RS(10,4)
|
||||
- 10 data shards + 4 parity shards
|
||||
- Rack-aware distribution
|
||||
- Backup server redundancy
|
||||
|
||||
### Make Integration
|
||||
- Color-coded output for better readability
|
||||
- Comprehensive help system (`make help`)
|
||||
- Parallel execution support
|
||||
- Error handling and cleanup
|
||||
- Cross-platform compatibility
|
||||
|
||||
## 🎯 Quick Reference
|
||||
|
||||
```bash
|
||||
# Essential commands
|
||||
make help # Show all available targets
|
||||
make start # Start complete environment
|
||||
make health # Check all services
|
||||
make monitor # Open dashboard
|
||||
make logs-admin # View admin activity
|
||||
make clean # Complete cleanup
|
||||
|
||||
# Monitoring
|
||||
make volume-status # Check for EC candidates
|
||||
make admin-status # Check task queue
|
||||
make monitor-status # Full cluster status
|
||||
|
||||
# Scaling & Testing
|
||||
make test-ec # Run focused EC test
|
||||
make scale-load RATE=X # Increase load
|
||||
make troubleshoot # Diagnose issues
|
||||
```
|
||||
|
||||
This environment provides a realistic testing scenario for SeaweedFS EC workers with actual data operations, comprehensive monitoring, and easy management through Make targets.
|
|
@ -1,346 +0,0 @@
|
|||
# SeaweedFS Admin Integration Test Makefile
|
||||
# Tests the admin server and worker functionality using official weed commands
|
||||
|
||||
.PHONY: help build build-and-restart restart-workers start stop restart logs clean status test admin-ui worker-logs master-logs admin-logs vacuum-test vacuum-demo vacuum-status vacuum-data vacuum-data-high vacuum-data-low vacuum-continuous vacuum-clean vacuum-help
|
||||
.DEFAULT_GOAL := help
|
||||
|
||||
COMPOSE_FILE := docker-compose-ec-test.yml
|
||||
PROJECT_NAME := admin_integration
|
||||
|
||||
build: ## Build SeaweedFS with latest changes and create Docker image
|
||||
@echo "🔨 Building SeaweedFS with latest changes..."
|
||||
@echo "1️⃣ Generating admin templates..."
|
||||
@cd ../../ && make admin-generate
|
||||
@echo "2️⃣ Building Docker image with latest changes..."
|
||||
@cd ../ && make build
|
||||
@echo "3️⃣ Copying binary for local docker-compose..."
|
||||
@cp ../weed ./weed-local
|
||||
@echo "✅ Build complete! Updated image: chrislusf/seaweedfs:local"
|
||||
@echo "💡 Run 'make restart' to apply changes to running services"
|
||||
|
||||
build-and-restart: build ## Build with latest changes and restart services
|
||||
@echo "🔄 Recreating services with new image..."
|
||||
@echo "1️⃣ Recreating admin server with new image..."
|
||||
@docker-compose -f $(COMPOSE_FILE) up -d admin
|
||||
@sleep 5
|
||||
@echo "2️⃣ Recreating workers to reconnect..."
|
||||
@docker-compose -f $(COMPOSE_FILE) up -d worker1 worker2 worker3
|
||||
@echo "✅ All services recreated with latest changes!"
|
||||
@echo "🌐 Admin UI: http://localhost:23646/"
|
||||
@echo "💡 Workers will reconnect to the new admin server"
|
||||
|
||||
restart-workers: ## Restart all workers to reconnect to admin server
|
||||
@echo "🔄 Restarting workers to reconnect to admin server..."
|
||||
@docker-compose -f $(COMPOSE_FILE) restart worker1 worker2 worker3
|
||||
@echo "✅ Workers restarted and will reconnect to admin server"
|
||||
|
||||
help: ## Show this help message
|
||||
@echo "SeaweedFS Admin Integration Test"
|
||||
@echo "================================"
|
||||
@echo "Tests admin server task distribution to workers using official weed commands"
|
||||
@echo ""
|
||||
@echo "🏗️ Cluster Management:"
|
||||
@grep -E '^(start|stop|restart|clean|status|build):.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf " %-18s %s\n", $$1, $$2}'
|
||||
@echo ""
|
||||
@echo "🧪 Testing:"
|
||||
@grep -E '^(test|demo|validate|quick-test):.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf " %-18s %s\n", $$1, $$2}'
|
||||
@echo ""
|
||||
@echo "🗑️ Vacuum Testing:"
|
||||
@grep -E '^vacuum-.*:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf " %-18s %s\n", $$1, $$2}'
|
||||
@echo ""
|
||||
@echo "📜 Monitoring:"
|
||||
@grep -E '^(logs|admin-logs|worker-logs|master-logs|admin-ui):.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf " %-18s %s\n", $$1, $$2}'
|
||||
@echo ""
|
||||
@echo "🚀 Quick Start:"
|
||||
@echo " make start # Start cluster"
|
||||
@echo " make vacuum-test # Test vacuum tasks"
|
||||
@echo " make vacuum-help # Vacuum testing guide"
|
||||
@echo ""
|
||||
@echo "💡 For detailed vacuum testing: make vacuum-help"
|
||||
|
||||
start: ## Start the complete SeaweedFS cluster with admin and workers
|
||||
@echo "🚀 Starting SeaweedFS cluster with admin and workers..."
|
||||
@docker-compose -f $(COMPOSE_FILE) up -d
|
||||
@echo "✅ Cluster started!"
|
||||
@echo ""
|
||||
@echo "📊 Access points:"
|
||||
@echo " • Admin UI: http://localhost:23646/"
|
||||
@echo " • Master UI: http://localhost:9333/"
|
||||
@echo " • Filer: http://localhost:8888/"
|
||||
@echo ""
|
||||
@echo "📈 Services starting up..."
|
||||
@echo " • Master server: ✓"
|
||||
@echo " • Volume servers: Starting (6 servers)..."
|
||||
@echo " • Filer: Starting..."
|
||||
@echo " • Admin server: Starting..."
|
||||
@echo " • Workers: Starting (3 workers)..."
|
||||
@echo ""
|
||||
@echo "⏳ Use 'make status' to check startup progress"
|
||||
@echo "💡 Use 'make logs' to watch the startup process"
|
||||
|
||||
start-staged: ## Start services in proper order with delays
|
||||
@echo "🚀 Starting SeaweedFS cluster in stages..."
|
||||
@echo ""
|
||||
@echo "Stage 1: Starting Master server..."
|
||||
@docker-compose -f $(COMPOSE_FILE) up -d master
|
||||
@sleep 10
|
||||
@echo ""
|
||||
@echo "Stage 2: Starting Volume servers..."
|
||||
@docker-compose -f $(COMPOSE_FILE) up -d volume1 volume2 volume3 volume4 volume5 volume6
|
||||
@sleep 15
|
||||
@echo ""
|
||||
@echo "Stage 3: Starting Filer..."
|
||||
@docker-compose -f $(COMPOSE_FILE) up -d filer
|
||||
@sleep 10
|
||||
@echo ""
|
||||
@echo "Stage 4: Starting Admin server..."
|
||||
@docker-compose -f $(COMPOSE_FILE) up -d admin
|
||||
@sleep 15
|
||||
@echo ""
|
||||
@echo "Stage 5: Starting Workers..."
|
||||
@docker-compose -f $(COMPOSE_FILE) up -d worker1 worker2 worker3
|
||||
@sleep 10
|
||||
@echo ""
|
||||
@echo "Stage 6: Starting Load generator and Monitor..."
|
||||
@docker-compose -f $(COMPOSE_FILE) up -d load_generator monitor
|
||||
@echo ""
|
||||
@echo "✅ All services started!"
|
||||
@echo ""
|
||||
@echo "📊 Access points:"
|
||||
@echo " • Admin UI: http://localhost:23646/"
|
||||
@echo " • Master UI: http://localhost:9333/"
|
||||
@echo " • Filer: http://localhost:8888/"
|
||||
@echo ""
|
||||
@echo "⏳ Services are initializing... Use 'make status' to check progress"
|
||||
|
||||
stop: ## Stop all services
|
||||
@echo "🛑 Stopping SeaweedFS cluster..."
|
||||
@docker-compose -f $(COMPOSE_FILE) down
|
||||
@echo "✅ Cluster stopped"
|
||||
|
||||
restart: stop start ## Restart the entire cluster
|
||||
|
||||
clean: ## Stop and remove all containers, networks, and volumes
|
||||
@echo "🧹 Cleaning up SeaweedFS test environment..."
|
||||
@docker-compose -f $(COMPOSE_FILE) down -v --remove-orphans
|
||||
@docker system prune -f
|
||||
@rm -rf data/
|
||||
@echo "✅ Environment cleaned"
|
||||
|
||||
status: ## Check the status of all services
|
||||
@echo "📊 SeaweedFS Cluster Status"
|
||||
@echo "=========================="
|
||||
@docker-compose -f $(COMPOSE_FILE) ps
|
||||
@echo ""
|
||||
@echo "📋 Service Health:"
|
||||
@echo "Master:"
|
||||
@curl -s http://localhost:9333/cluster/status | jq '.IsLeader' 2>/dev/null || echo " ❌ Master not ready"
|
||||
@echo "Admin:"
|
||||
@curl -s http://localhost:23646/ | grep -q "Admin" && echo " ✅ Admin ready" || echo " ❌ Admin not ready"
|
||||
|
||||
logs: ## Show logs from all services
|
||||
@echo "📜 Following logs from all services..."
|
||||
@echo "💡 Press Ctrl+C to stop following logs"
|
||||
@docker-compose -f $(COMPOSE_FILE) logs -f
|
||||
|
||||
admin-logs: ## Show logs from admin server only
|
||||
@echo "📜 Admin server logs:"
|
||||
@docker-compose -f $(COMPOSE_FILE) logs -f admin
|
||||
|
||||
worker-logs: ## Show logs from all workers
|
||||
@echo "📜 Worker logs:"
|
||||
@docker-compose -f $(COMPOSE_FILE) logs -f worker1 worker2 worker3
|
||||
|
||||
master-logs: ## Show logs from master server
|
||||
@echo "📜 Master server logs:"
|
||||
@docker-compose -f $(COMPOSE_FILE) logs -f master
|
||||
|
||||
admin-ui: ## Open admin UI in browser (macOS)
|
||||
@echo "🌐 Opening admin UI in browser..."
|
||||
@open http://localhost:23646/ || echo "💡 Manually open: http://localhost:23646/"
|
||||
|
||||
test: ## Run integration test to verify task assignment and completion
|
||||
@echo "🧪 Running Admin-Worker Integration Test"
|
||||
@echo "========================================"
|
||||
@echo ""
|
||||
@echo "1️⃣ Checking cluster health..."
|
||||
@sleep 5
|
||||
@curl -s http://localhost:9333/cluster/status | jq '.IsLeader' > /dev/null && echo "✅ Master healthy" || echo "❌ Master not ready"
|
||||
@curl -s http://localhost:23646/ | grep -q "Admin" && echo "✅ Admin healthy" || echo "❌ Admin not ready"
|
||||
@echo ""
|
||||
@echo "2️⃣ Checking worker registration..."
|
||||
@sleep 10
|
||||
@echo "💡 Check admin UI for connected workers: http://localhost:23646/"
|
||||
@echo ""
|
||||
@echo "3️⃣ Generating load to trigger EC tasks..."
|
||||
@echo "📝 Creating test files to fill volumes..."
|
||||
@echo "Creating large files with random data to trigger EC (targeting ~60MB total to exceed 50MB limit)..."
|
||||
@for i in {1..12}; do \
|
||||
echo "Creating 5MB random file $$i..."; \
|
||||
docker run --rm --network admin_integration_seaweed_net -v /tmp:/tmp --entrypoint sh chrislusf/seaweedfs:local -c "dd if=/dev/urandom of=/tmp/largefile$$i.dat bs=1M count=5 2>/dev/null && weed upload -master=master:9333 /tmp/largefile$$i.dat && rm /tmp/largefile$$i.dat"; \
|
||||
sleep 3; \
|
||||
done
|
||||
@echo ""
|
||||
@echo "4️⃣ Waiting for volumes to process large files and reach 50MB limit..."
|
||||
@echo "This may take a few minutes as we're uploading 60MB of data..."
|
||||
@sleep 60
|
||||
@echo ""
|
||||
@echo "5️⃣ Checking for EC task creation and assignment..."
|
||||
@echo "💡 Monitor the admin UI to see:"
|
||||
@echo " • Tasks being created for volumes needing EC"
|
||||
@echo " • Workers picking up tasks"
|
||||
@echo " • Task progress (pending → running → completed)"
|
||||
@echo " • EC shards being distributed"
|
||||
@echo ""
|
||||
@echo "✅ Integration test setup complete!"
|
||||
@echo "📊 Monitor progress at: http://localhost:23646/"
|
||||
|
||||
quick-test: ## Quick verification that core services are running
|
||||
@echo "⚡ Quick Health Check"
|
||||
@echo "===================="
|
||||
@echo "Master: $$(curl -s http://localhost:9333/cluster/status | jq -r '.IsLeader // "not ready"')"
|
||||
@echo "Admin: $$(curl -s http://localhost:23646/ | grep -q "Admin" && echo "ready" || echo "not ready")"
|
||||
@echo "Workers: $$(docker-compose -f $(COMPOSE_FILE) ps worker1 worker2 worker3 | grep -c Up) running"
|
||||
|
||||
validate: ## Validate integration test configuration
|
||||
@echo "🔍 Validating Integration Test Configuration"
|
||||
@echo "==========================================="
|
||||
@chmod +x test-integration.sh
|
||||
@./test-integration.sh
|
||||
|
||||
demo: start ## Start cluster and run demonstration
|
||||
@echo "🎭 SeaweedFS Admin-Worker Demo"
|
||||
@echo "============================="
|
||||
@echo ""
|
||||
@echo "⏳ Waiting for services to start..."
|
||||
@sleep 45
|
||||
@echo ""
|
||||
@echo "🎯 Demo Overview:"
|
||||
@echo " • 1 Master server (coordinates cluster)"
|
||||
@echo " • 6 Volume servers (50MB volume limit)"
|
||||
@echo " • 1 Admin server (task management)"
|
||||
@echo " • 3 Workers (execute EC tasks)"
|
||||
@echo " • Load generator (creates files continuously)"
|
||||
@echo ""
|
||||
@echo "📊 Watch the process:"
|
||||
@echo " 1. Visit: http://localhost:23646/"
|
||||
@echo " 2. Observe workers connecting"
|
||||
@echo " 3. Watch tasks being created and assigned"
|
||||
@echo " 4. See tasks progress from pending → completed"
|
||||
@echo ""
|
||||
@echo "🔄 The demo will:"
|
||||
@echo " • Fill volumes to 50MB limit"
|
||||
@echo " • Admin detects volumes needing EC"
|
||||
@echo " • Workers receive and execute EC tasks"
|
||||
@echo " • Tasks complete with shard distribution"
|
||||
@echo ""
|
||||
@echo "💡 Use 'make worker-logs' to see worker activity"
|
||||
@echo "💡 Use 'make admin-logs' to see admin task management"
|
||||
|
||||
# Vacuum Testing Targets
|
||||
vacuum-test: ## Create test data with garbage and verify vacuum detection
|
||||
@echo "🧪 SeaweedFS Vacuum Task Testing"
|
||||
@echo "================================"
|
||||
@echo ""
|
||||
@echo "1️⃣ Checking cluster health..."
|
||||
@curl -s http://localhost:9333/cluster/status | jq '.IsLeader' > /dev/null && echo "✅ Master ready" || (echo "❌ Master not ready. Run 'make start' first." && exit 1)
|
||||
@curl -s http://localhost:23646/ | grep -q "Admin" && echo "✅ Admin ready" || (echo "❌ Admin not ready. Run 'make start' first." && exit 1)
|
||||
@echo ""
|
||||
@echo "2️⃣ Creating test data with garbage..."
|
||||
@docker-compose -f $(COMPOSE_FILE) exec vacuum-tester go run create_vacuum_test_data.go -files=25 -delete=0.5 -size=200
|
||||
@echo ""
|
||||
@echo "3️⃣ Configuration Instructions:"
|
||||
@echo " Visit: http://localhost:23646/maintenance/config/vacuum"
|
||||
@echo " Set for testing:"
|
||||
@echo " • Enable Vacuum Tasks: ✅ Checked"
|
||||
@echo " • Garbage Threshold: 0.20 (20%)"
|
||||
@echo " • Scan Interval: [30] [Seconds]"
|
||||
@echo " • Min Volume Age: [0] [Minutes]"
|
||||
@echo " • Max Concurrent: 2"
|
||||
@echo ""
|
||||
@echo "4️⃣ Monitor vacuum tasks at: http://localhost:23646/maintenance"
|
||||
@echo ""
|
||||
@echo "💡 Use 'make vacuum-status' to check volume garbage ratios"
|
||||
|
||||
vacuum-demo: ## Run automated vacuum testing demonstration
|
||||
@echo "🎭 Vacuum Task Demo"
|
||||
@echo "=================="
|
||||
@echo ""
|
||||
@echo "⚠️ This demo requires user interaction for configuration"
|
||||
@echo "💡 Make sure cluster is running with 'make start'"
|
||||
@echo ""
|
||||
@docker-compose -f $(COMPOSE_FILE) exec vacuum-tester sh -c "chmod +x demo_vacuum_testing.sh && ./demo_vacuum_testing.sh"
|
||||
|
||||
vacuum-status: ## Check current volume status and garbage ratios
|
||||
@echo "📊 Current Volume Status"
|
||||
@echo "======================="
|
||||
@docker-compose -f $(COMPOSE_FILE) exec vacuum-tester sh -c "chmod +x check_volumes.sh && ./check_volumes.sh"
|
||||
|
||||
vacuum-data: ## Create test data with configurable parameters
|
||||
@echo "📁 Creating vacuum test data..."
|
||||
@echo "Usage: make vacuum-data [FILES=20] [DELETE=0.4] [SIZE=100]"
|
||||
@echo ""
|
||||
@docker-compose -f $(COMPOSE_FILE) exec vacuum-tester go run create_vacuum_test_data.go \
|
||||
-files=$${FILES:-20} \
|
||||
-delete=$${DELETE:-0.4} \
|
||||
-size=$${SIZE:-100}
|
||||
|
||||
vacuum-data-high: ## Create high garbage ratio test data (should trigger vacuum)
|
||||
@echo "📁 Creating high garbage test data (70% garbage)..."
|
||||
@docker-compose -f $(COMPOSE_FILE) exec vacuum-tester go run create_vacuum_test_data.go -files=30 -delete=0.7 -size=150
|
||||
|
||||
vacuum-data-low: ## Create low garbage ratio test data (should NOT trigger vacuum)
|
||||
@echo "📁 Creating low garbage test data (15% garbage)..."
|
||||
@docker-compose -f $(COMPOSE_FILE) exec vacuum-tester go run create_vacuum_test_data.go -files=30 -delete=0.15 -size=150
|
||||
|
||||
vacuum-continuous: ## Generate garbage continuously for testing
|
||||
@echo "🔄 Generating continuous garbage for vacuum testing..."
|
||||
@echo "Creating 5 rounds of test data with 30-second intervals..."
|
||||
@for i in {1..5}; do \
|
||||
echo "Round $$i: Creating garbage..."; \
|
||||
docker-compose -f $(COMPOSE_FILE) exec vacuum-tester go run create_vacuum_test_data.go -files=10 -delete=0.6 -size=100; \
|
||||
echo "Waiting 30 seconds..."; \
|
||||
sleep 30; \
|
||||
done
|
||||
@echo "✅ Continuous test complete. Check vacuum task activity!"
|
||||
|
||||
vacuum-clean: ## Clean up vacuum test data (removes all volumes!)
|
||||
@echo "🧹 Cleaning up vacuum test data..."
|
||||
@echo "⚠️ WARNING: This will delete ALL volumes!"
|
||||
@read -p "Are you sure? (y/N): " confirm && [ "$$confirm" = "y" ] || exit 1
|
||||
@echo "Stopping cluster..."
|
||||
@docker-compose -f $(COMPOSE_FILE) down
|
||||
@echo "Removing volume data..."
|
||||
@rm -rf data/volume*/
|
||||
@echo "Restarting cluster..."
|
||||
@docker-compose -f $(COMPOSE_FILE) up -d
|
||||
@echo "✅ Clean up complete. Fresh volumes ready for testing."
|
||||
|
||||
vacuum-help: ## Show vacuum testing help and examples
|
||||
@echo "🧪 Vacuum Testing Commands (Docker-based)"
|
||||
@echo "=========================================="
|
||||
@echo ""
|
||||
@echo "Quick Start:"
|
||||
@echo " make start # Start SeaweedFS cluster with vacuum-tester"
|
||||
@echo " make vacuum-test # Create test data and instructions"
|
||||
@echo " make vacuum-status # Check volume status"
|
||||
@echo ""
|
||||
@echo "Data Generation:"
|
||||
@echo " make vacuum-data-high # High garbage (should trigger)"
|
||||
@echo " make vacuum-data-low # Low garbage (should NOT trigger)"
|
||||
@echo " make vacuum-continuous # Continuous garbage generation"
|
||||
@echo ""
|
||||
@echo "Monitoring:"
|
||||
@echo " make vacuum-status # Quick volume status check"
|
||||
@echo " make vacuum-demo # Full guided demonstration"
|
||||
@echo ""
|
||||
@echo "Configuration:"
|
||||
@echo " Visit: http://localhost:23646/maintenance/config/vacuum"
|
||||
@echo " Monitor: http://localhost:23646/maintenance"
|
||||
@echo ""
|
||||
@echo "Custom Parameters:"
|
||||
@echo " make vacuum-data FILES=50 DELETE=0.8 SIZE=200"
|
||||
@echo ""
|
||||
@echo "💡 All commands now run inside Docker containers"
|
||||
@echo "Documentation:"
|
||||
@echo " See: VACUUM_TEST_README.md for complete guide"
|
|
@ -1,32 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
echo "📊 Quick Volume Status Check"
|
||||
echo "============================"
|
||||
echo ""
|
||||
|
||||
# Check if master is running
|
||||
MASTER_URL="${MASTER_HOST:-master:9333}"
|
||||
if ! curl -s http://$MASTER_URL/cluster/status > /dev/null; then
|
||||
echo "❌ Master server not available at $MASTER_URL"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🔍 Fetching volume status from master..."
|
||||
curl -s "http://$MASTER_URL/vol/status" | jq -r '
|
||||
if .Volumes and .Volumes.DataCenters then
|
||||
.Volumes.DataCenters | to_entries[] | .value | to_entries[] | .value | to_entries[] | .value | if . then .[] else empty end |
|
||||
"Volume \(.Id):
|
||||
Size: \(.Size | if . < 1024 then "\(.) B" elif . < 1048576 then "\(. / 1024 | floor) KB" elif . < 1073741824 then "\(. / 1048576 * 100 | floor / 100) MB" else "\(. / 1073741824 * 100 | floor / 100) GB" end)
|
||||
Files: \(.FileCount) active, \(.DeleteCount) deleted
|
||||
Garbage: \(.DeletedByteCount | if . < 1024 then "\(.) B" elif . < 1048576 then "\(. / 1024 | floor) KB" elif . < 1073741824 then "\(. / 1048576 * 100 | floor / 100) MB" else "\(. / 1073741824 * 100 | floor / 100) GB" end) (\(if .Size > 0 then (.DeletedByteCount / .Size * 100 | floor) else 0 end)%)
|
||||
Status: \(if (.DeletedByteCount / .Size * 100) > 30 then "🎯 NEEDS VACUUM" else "✅ OK" end)
|
||||
"
|
||||
else
|
||||
"No volumes found"
|
||||
end'
|
||||
|
||||
echo ""
|
||||
echo "💡 Legend:"
|
||||
echo " 🎯 NEEDS VACUUM: >30% garbage ratio"
|
||||
echo " ✅ OK: <30% garbage ratio"
|
||||
echo ""
|
|
@ -1,280 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
master = flag.String("master", "master:9333", "SeaweedFS master server address")
|
||||
fileCount = flag.Int("files", 20, "Number of files to create")
|
||||
deleteRatio = flag.Float64("delete", 0.4, "Ratio of files to delete (0.0-1.0)")
|
||||
fileSizeKB = flag.Int("size", 100, "Size of each file in KB")
|
||||
)
|
||||
|
||||
type AssignResult struct {
|
||||
Fid string `json:"fid"`
|
||||
Url string `json:"url"`
|
||||
PublicUrl string `json:"publicUrl"`
|
||||
Count int `json:"count"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
fmt.Println("🧪 Creating fake data for vacuum task testing...")
|
||||
fmt.Printf("Master: %s\n", *master)
|
||||
fmt.Printf("Files to create: %d\n", *fileCount)
|
||||
fmt.Printf("Delete ratio: %.1f%%\n", *deleteRatio*100)
|
||||
fmt.Printf("File size: %d KB\n", *fileSizeKB)
|
||||
fmt.Println()
|
||||
|
||||
if *fileCount == 0 {
|
||||
// Just check volume status
|
||||
fmt.Println("📊 Checking volume status...")
|
||||
checkVolumeStatus()
|
||||
return
|
||||
}
|
||||
|
||||
// Step 1: Create test files
|
||||
fmt.Println("📁 Step 1: Creating test files...")
|
||||
fids := createTestFiles()
|
||||
|
||||
// Step 2: Delete some files to create garbage
|
||||
fmt.Println("🗑️ Step 2: Deleting files to create garbage...")
|
||||
deleteFiles(fids)
|
||||
|
||||
// Step 3: Check volume status
|
||||
fmt.Println("📊 Step 3: Checking volume status...")
|
||||
checkVolumeStatus()
|
||||
|
||||
// Step 4: Configure vacuum for testing
|
||||
fmt.Println("⚙️ Step 4: Instructions for testing...")
|
||||
printTestingInstructions()
|
||||
}
|
||||
|
||||
func createTestFiles() []string {
|
||||
var fids []string
|
||||
|
||||
for i := 0; i < *fileCount; i++ {
|
||||
// Generate random file content
|
||||
fileData := make([]byte, *fileSizeKB*1024)
|
||||
rand.Read(fileData)
|
||||
|
||||
// Get file ID assignment
|
||||
assign, err := assignFileId()
|
||||
if err != nil {
|
||||
log.Printf("Failed to assign file ID for file %d: %v", i, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Upload file
|
||||
err = uploadFile(assign, fileData, fmt.Sprintf("test_file_%d.dat", i))
|
||||
if err != nil {
|
||||
log.Printf("Failed to upload file %d: %v", i, err)
|
||||
continue
|
||||
}
|
||||
|
||||
fids = append(fids, assign.Fid)
|
||||
|
||||
if (i+1)%5 == 0 {
|
||||
fmt.Printf(" Created %d/%d files...\n", i+1, *fileCount)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("✅ Created %d files successfully\n\n", len(fids))
|
||||
return fids
|
||||
}
|
||||
|
||||
func deleteFiles(fids []string) {
|
||||
deleteCount := int(float64(len(fids)) * *deleteRatio)
|
||||
|
||||
for i := 0; i < deleteCount; i++ {
|
||||
err := deleteFile(fids[i])
|
||||
if err != nil {
|
||||
log.Printf("Failed to delete file %s: %v", fids[i], err)
|
||||
continue
|
||||
}
|
||||
|
||||
if (i+1)%5 == 0 {
|
||||
fmt.Printf(" Deleted %d/%d files...\n", i+1, deleteCount)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("✅ Deleted %d files (%.1f%% of total)\n\n", deleteCount, *deleteRatio*100)
|
||||
}
|
||||
|
||||
func assignFileId() (*AssignResult, error) {
|
||||
resp, err := http.Get(fmt.Sprintf("http://%s/dir/assign", *master))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result AssignResult
|
||||
err = json.NewDecoder(resp.Body).Decode(&result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if result.Error != "" {
|
||||
return nil, fmt.Errorf("assignment error: %s", result.Error)
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func uploadFile(assign *AssignResult, data []byte, filename string) error {
|
||||
url := fmt.Sprintf("http://%s/%s", assign.Url, assign.Fid)
|
||||
|
||||
body := &bytes.Buffer{}
|
||||
body.Write(data)
|
||||
|
||||
req, err := http.NewRequest("POST", url, body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
if filename != "" {
|
||||
req.Header.Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename))
|
||||
}
|
||||
|
||||
client := &http.Client{Timeout: 30 * time.Second}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusCreated && resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return fmt.Errorf("upload failed with status %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteFile(fid string) error {
|
||||
url := fmt.Sprintf("http://%s/%s", *master, fid)
|
||||
|
||||
req, err := http.NewRequest("DELETE", url, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client := &http.Client{Timeout: 10 * time.Second}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkVolumeStatus() {
|
||||
// Get volume list from master
|
||||
resp, err := http.Get(fmt.Sprintf("http://%s/vol/status", *master))
|
||||
if err != nil {
|
||||
log.Printf("Failed to get volume status: %v", err)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var volumes map[string]interface{}
|
||||
err = json.NewDecoder(resp.Body).Decode(&volumes)
|
||||
if err != nil {
|
||||
log.Printf("Failed to decode volume status: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println("📊 Volume Status Summary:")
|
||||
|
||||
if vols, ok := volumes["Volumes"].([]interface{}); ok {
|
||||
for _, vol := range vols {
|
||||
if v, ok := vol.(map[string]interface{}); ok {
|
||||
id := int(v["Id"].(float64))
|
||||
size := uint64(v["Size"].(float64))
|
||||
fileCount := int(v["FileCount"].(float64))
|
||||
deleteCount := int(v["DeleteCount"].(float64))
|
||||
deletedBytes := uint64(v["DeletedByteCount"].(float64))
|
||||
|
||||
garbageRatio := 0.0
|
||||
if size > 0 {
|
||||
garbageRatio = float64(deletedBytes) / float64(size) * 100
|
||||
}
|
||||
|
||||
fmt.Printf(" Volume %d:\n", id)
|
||||
fmt.Printf(" Size: %s\n", formatBytes(size))
|
||||
fmt.Printf(" Files: %d (active), %d (deleted)\n", fileCount, deleteCount)
|
||||
fmt.Printf(" Garbage: %s (%.1f%%)\n", formatBytes(deletedBytes), garbageRatio)
|
||||
|
||||
if garbageRatio > 30 {
|
||||
fmt.Printf(" 🎯 This volume should trigger vacuum (>30%% garbage)\n")
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func formatBytes(bytes uint64) string {
|
||||
if bytes < 1024 {
|
||||
return fmt.Sprintf("%d B", bytes)
|
||||
} else if bytes < 1024*1024 {
|
||||
return fmt.Sprintf("%.1f KB", float64(bytes)/1024)
|
||||
} else if bytes < 1024*1024*1024 {
|
||||
return fmt.Sprintf("%.1f MB", float64(bytes)/(1024*1024))
|
||||
} else {
|
||||
return fmt.Sprintf("%.1f GB", float64(bytes)/(1024*1024*1024))
|
||||
}
|
||||
}
|
||||
|
||||
func printTestingInstructions() {
|
||||
fmt.Println("🧪 Testing Instructions:")
|
||||
fmt.Println()
|
||||
fmt.Println("1. Configure Vacuum for Testing:")
|
||||
fmt.Println(" Visit: http://localhost:23646/maintenance/config/vacuum")
|
||||
fmt.Println(" Set:")
|
||||
fmt.Printf(" - Garbage Percentage Threshold: 20 (20%% - lower than default 30)\n")
|
||||
fmt.Printf(" - Scan Interval: [30] [Seconds] (faster than default)\n")
|
||||
fmt.Printf(" - Min Volume Age: [0] [Minutes] (no age requirement)\n")
|
||||
fmt.Printf(" - Max Concurrent: 2\n")
|
||||
fmt.Printf(" - Min Interval: 1m (faster repeat)\n")
|
||||
fmt.Println()
|
||||
|
||||
fmt.Println("2. Monitor Vacuum Tasks:")
|
||||
fmt.Println(" Visit: http://localhost:23646/maintenance")
|
||||
fmt.Println(" Watch for vacuum tasks to appear in the queue")
|
||||
fmt.Println()
|
||||
|
||||
fmt.Println("3. Manual Vacuum (Optional):")
|
||||
fmt.Println(" curl -X POST 'http://localhost:9333/vol/vacuum?garbageThreshold=0.20'")
|
||||
fmt.Println(" (Note: Master API still uses 0.0-1.0 decimal format)")
|
||||
fmt.Println()
|
||||
|
||||
fmt.Println("4. Check Logs:")
|
||||
fmt.Println(" Look for messages like:")
|
||||
fmt.Println(" - 'Vacuum detector found X volumes needing vacuum'")
|
||||
fmt.Println(" - 'Applied vacuum configuration'")
|
||||
fmt.Println(" - 'Worker executing task: vacuum'")
|
||||
fmt.Println()
|
||||
|
||||
fmt.Println("5. Verify Results:")
|
||||
fmt.Println(" Re-run this script with -files=0 to check volume status")
|
||||
fmt.Println(" Garbage ratios should decrease after vacuum operations")
|
||||
fmt.Println()
|
||||
|
||||
fmt.Printf("🚀 Quick test command:\n")
|
||||
fmt.Printf(" go run create_vacuum_test_data.go -files=0\n")
|
||||
fmt.Println()
|
||||
}
|
|
@ -1,105 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
echo "🧪 SeaweedFS Vacuum Task Testing Demo"
|
||||
echo "======================================"
|
||||
echo ""
|
||||
|
||||
# Check if SeaweedFS is running
|
||||
echo "📋 Checking SeaweedFS status..."
|
||||
MASTER_URL="${MASTER_HOST:-master:9333}"
|
||||
ADMIN_URL="${ADMIN_HOST:-admin:23646}"
|
||||
|
||||
if ! curl -s http://$MASTER_URL/cluster/status > /dev/null; then
|
||||
echo "❌ SeaweedFS master not running at $MASTER_URL"
|
||||
echo " Please ensure Docker cluster is running: make start"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! curl -s http://volume1:8080/status > /dev/null; then
|
||||
echo "❌ SeaweedFS volume servers not running"
|
||||
echo " Please ensure Docker cluster is running: make start"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! curl -s http://$ADMIN_URL/ > /dev/null; then
|
||||
echo "❌ SeaweedFS admin server not running at $ADMIN_URL"
|
||||
echo " Please ensure Docker cluster is running: make start"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ All SeaweedFS components are running"
|
||||
echo ""
|
||||
|
||||
# Phase 1: Create test data
|
||||
echo "📁 Phase 1: Creating test data with garbage..."
|
||||
go run create_vacuum_test_data.go -master=$MASTER_URL -files=15 -delete=0.5 -size=150
|
||||
echo ""
|
||||
|
||||
# Phase 2: Check initial status
|
||||
echo "📊 Phase 2: Checking initial volume status..."
|
||||
go run create_vacuum_test_data.go -master=$MASTER_URL -files=0
|
||||
echo ""
|
||||
|
||||
# Phase 3: Configure vacuum
|
||||
echo "⚙️ Phase 3: Vacuum configuration instructions..."
|
||||
echo " 1. Visit: http://localhost:23646/maintenance/config/vacuum"
|
||||
echo " 2. Set these values for testing:"
|
||||
echo " - Enable Vacuum Tasks: ✅ Checked"
|
||||
echo " - Garbage Threshold: 0.30"
|
||||
echo " - Scan Interval: [30] [Seconds]"
|
||||
echo " - Min Volume Age: [0] [Minutes]"
|
||||
echo " - Max Concurrent: 2"
|
||||
echo " 3. Click 'Save Configuration'"
|
||||
echo ""
|
||||
|
||||
read -p " Press ENTER after configuring vacuum settings..."
|
||||
echo ""
|
||||
|
||||
# Phase 4: Monitor tasks
|
||||
echo "🎯 Phase 4: Monitoring vacuum tasks..."
|
||||
echo " Visit: http://localhost:23646/maintenance"
|
||||
echo " You should see vacuum tasks appear within 30 seconds"
|
||||
echo ""
|
||||
|
||||
echo " Waiting 60 seconds for vacuum detection and execution..."
|
||||
for i in {60..1}; do
|
||||
printf "\r Countdown: %02d seconds" $i
|
||||
sleep 1
|
||||
done
|
||||
echo ""
|
||||
echo ""
|
||||
|
||||
# Phase 5: Check results
|
||||
echo "📈 Phase 5: Checking results after vacuum..."
|
||||
go run create_vacuum_test_data.go -master=$MASTER_URL -files=0
|
||||
echo ""
|
||||
|
||||
# Phase 6: Create more garbage for continuous testing
|
||||
echo "🔄 Phase 6: Creating additional garbage for continuous testing..."
|
||||
echo " Running 3 rounds of garbage creation..."
|
||||
|
||||
for round in {1..3}; do
|
||||
echo " Round $round: Creating garbage..."
|
||||
go run create_vacuum_test_data.go -master=$MASTER_URL -files=8 -delete=0.6 -size=100
|
||||
echo " Waiting 30 seconds before next round..."
|
||||
sleep 30
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "📊 Final volume status:"
|
||||
go run create_vacuum_test_data.go -master=$MASTER_URL -files=0
|
||||
echo ""
|
||||
|
||||
echo "🎉 Demo Complete!"
|
||||
echo ""
|
||||
echo "🔍 Things to check:"
|
||||
echo " 1. Maintenance Queue: http://localhost:23646/maintenance"
|
||||
echo " 2. Volume Status: http://localhost:9333/vol/status"
|
||||
echo " 3. Admin Dashboard: http://localhost:23646"
|
||||
echo ""
|
||||
echo "💡 Next Steps:"
|
||||
echo " - Try different garbage thresholds (0.10, 0.50, 0.80)"
|
||||
echo " - Adjust scan intervals (10s, 1m, 5m)"
|
||||
echo " - Monitor logs for vacuum operations"
|
||||
echo " - Test with multiple volumes"
|
||||
echo ""
|
|
@ -1,240 +0,0 @@
|
|||
name: admin_integration
|
||||
|
||||
networks:
|
||||
seaweed_net:
|
||||
driver: bridge
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- "9333:9333"
|
||||
- "19333:19333"
|
||||
command: "master -ip=master -mdir=/data -volumeSizeLimitMB=50"
|
||||
environment:
|
||||
- WEED_MASTER_VOLUME_GROWTH_COPY_1=1
|
||||
- WEED_MASTER_VOLUME_GROWTH_COPY_2=2
|
||||
- WEED_MASTER_VOLUME_GROWTH_COPY_OTHER=1
|
||||
volumes:
|
||||
- ./data/master:/data
|
||||
networks:
|
||||
- seaweed_net
|
||||
|
||||
volume1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- "8080:8080"
|
||||
- "18080:18080"
|
||||
command: "volume -mserver=master:9333 -ip=volume1 -dir=/data -max=10"
|
||||
depends_on:
|
||||
- master
|
||||
volumes:
|
||||
- ./data/volume1:/data
|
||||
networks:
|
||||
- seaweed_net
|
||||
|
||||
volume2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- "8081:8080"
|
||||
- "18081:18080"
|
||||
command: "volume -mserver=master:9333 -ip=volume2 -dir=/data -max=10"
|
||||
depends_on:
|
||||
- master
|
||||
volumes:
|
||||
- ./data/volume2:/data
|
||||
networks:
|
||||
- seaweed_net
|
||||
|
||||
volume3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- "8082:8080"
|
||||
- "18082:18080"
|
||||
command: "volume -mserver=master:9333 -ip=volume3 -dir=/data -max=10"
|
||||
depends_on:
|
||||
- master
|
||||
volumes:
|
||||
- ./data/volume3:/data
|
||||
networks:
|
||||
- seaweed_net
|
||||
|
||||
volume4:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- "8083:8080"
|
||||
- "18083:18080"
|
||||
command: "volume -mserver=master:9333 -ip=volume4 -dir=/data -max=10"
|
||||
depends_on:
|
||||
- master
|
||||
volumes:
|
||||
- ./data/volume4:/data
|
||||
networks:
|
||||
- seaweed_net
|
||||
|
||||
volume5:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- "8084:8080"
|
||||
- "18084:18080"
|
||||
command: "volume -mserver=master:9333 -ip=volume5 -dir=/data -max=10"
|
||||
depends_on:
|
||||
- master
|
||||
volumes:
|
||||
- ./data/volume5:/data
|
||||
networks:
|
||||
- seaweed_net
|
||||
|
||||
volume6:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- "8085:8080"
|
||||
- "18085:18080"
|
||||
command: "volume -mserver=master:9333 -ip=volume6 -dir=/data -max=10"
|
||||
depends_on:
|
||||
- master
|
||||
volumes:
|
||||
- ./data/volume6:/data
|
||||
networks:
|
||||
- seaweed_net
|
||||
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- "8888:8888"
|
||||
- "18888:18888"
|
||||
command: "filer -master=master:9333 -ip=filer"
|
||||
depends_on:
|
||||
- master
|
||||
volumes:
|
||||
- ./data/filer:/data
|
||||
networks:
|
||||
- seaweed_net
|
||||
|
||||
admin:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- "23646:23646" # HTTP admin interface (default port)
|
||||
- "33646:33646" # gRPC worker communication (23646 + 10000)
|
||||
command: "admin -port=23646 -masters=master:9333 -dataDir=/data"
|
||||
depends_on:
|
||||
- master
|
||||
- filer
|
||||
volumes:
|
||||
- ./data/admin:/data
|
||||
networks:
|
||||
- seaweed_net
|
||||
|
||||
worker1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
command: "-v=2 worker -admin=admin:23646 -capabilities=erasure_coding,vacuum -maxConcurrent=2"
|
||||
depends_on:
|
||||
- admin
|
||||
volumes:
|
||||
- ./data/worker1:/data
|
||||
networks:
|
||||
- seaweed_net
|
||||
environment:
|
||||
- WORKER_ID=worker-1
|
||||
|
||||
worker2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
command: "-v=2 worker -admin=admin:23646 -capabilities=erasure_coding,vacuum -maxConcurrent=2"
|
||||
depends_on:
|
||||
- admin
|
||||
volumes:
|
||||
- ./data/worker2:/data
|
||||
networks:
|
||||
- seaweed_net
|
||||
environment:
|
||||
- WORKER_ID=worker-2
|
||||
|
||||
worker3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
command: "-v=2 worker -admin=admin:23646 -capabilities=erasure_coding,vacuum -maxConcurrent=2"
|
||||
depends_on:
|
||||
- admin
|
||||
volumes:
|
||||
- ./data/worker3:/data
|
||||
networks:
|
||||
- seaweed_net
|
||||
environment:
|
||||
- WORKER_ID=worker-3
|
||||
|
||||
load_generator:
|
||||
image: chrislusf/seaweedfs:local
|
||||
entrypoint: ["/bin/sh"]
|
||||
command: >
|
||||
-c "
|
||||
echo 'Starting load generator...';
|
||||
sleep 30;
|
||||
echo 'Generating continuous load with 50MB volume limit...';
|
||||
while true; do
|
||||
echo 'Writing test files...';
|
||||
echo 'Test file content at $(date)' | /usr/bin/weed upload -server=master:9333;
|
||||
sleep 5;
|
||||
echo 'Deleting some files...';
|
||||
/usr/bin/weed shell -master=master:9333 <<< 'fs.rm /test_file_*' || true;
|
||||
sleep 10;
|
||||
done
|
||||
"
|
||||
depends_on:
|
||||
- master
|
||||
- filer
|
||||
- admin
|
||||
networks:
|
||||
- seaweed_net
|
||||
|
||||
monitor:
|
||||
image: alpine:latest
|
||||
entrypoint: ["/bin/sh"]
|
||||
command: >
|
||||
-c "
|
||||
apk add --no-cache curl jq;
|
||||
echo 'Starting cluster monitor...';
|
||||
sleep 30;
|
||||
while true; do
|
||||
echo '=== Cluster Status $(date) ===';
|
||||
echo 'Master status:';
|
||||
curl -s http://master:9333/cluster/status | jq '.IsLeader, .Peers' || echo 'Master not ready';
|
||||
echo;
|
||||
echo 'Admin status:';
|
||||
curl -s http://admin:23646/ | grep -o 'Admin.*Interface' || echo 'Admin not ready';
|
||||
echo;
|
||||
echo 'Volume count by server:';
|
||||
curl -s http://master:9333/vol/status | jq '.Volumes | length' || echo 'Volumes not ready';
|
||||
echo;
|
||||
sleep 60;
|
||||
done
|
||||
"
|
||||
depends_on:
|
||||
- master
|
||||
- admin
|
||||
- filer
|
||||
networks:
|
||||
- seaweed_net
|
||||
|
||||
vacuum-tester:
|
||||
image: chrislusf/seaweedfs:local
|
||||
entrypoint: ["/bin/sh"]
|
||||
command: >
|
||||
-c "
|
||||
echo 'Installing dependencies for vacuum testing...';
|
||||
apk add --no-cache jq curl go bash;
|
||||
echo 'Vacuum tester ready...';
|
||||
echo 'Use: docker-compose exec vacuum-tester sh';
|
||||
echo 'Available commands: go, weed, curl, jq, bash, sh';
|
||||
sleep infinity
|
||||
"
|
||||
depends_on:
|
||||
- master
|
||||
- admin
|
||||
- filer
|
||||
volumes:
|
||||
- .:/testing
|
||||
working_dir: /testing
|
||||
networks:
|
||||
- seaweed_net
|
||||
environment:
|
||||
- MASTER_HOST=master:9333
|
||||
- ADMIN_HOST=admin:23646
|
|
@ -1,73 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
echo "🧪 Testing SeaweedFS Admin-Worker Integration"
|
||||
echo "============================================="
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
echo -e "${BLUE}1. Validating docker-compose configuration...${NC}"
|
||||
if docker-compose -f docker-compose-ec-test.yml config > /dev/null; then
|
||||
echo -e "${GREEN}✅ Docker compose configuration is valid${NC}"
|
||||
else
|
||||
echo -e "${RED}❌ Docker compose configuration is invalid${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${BLUE}2. Checking if required ports are available...${NC}"
|
||||
for port in 9333 8080 8081 8082 8083 8084 8085 8888 23646; do
|
||||
if lsof -i :$port > /dev/null 2>&1; then
|
||||
echo -e "${YELLOW}⚠️ Port $port is in use${NC}"
|
||||
else
|
||||
echo -e "${GREEN}✅ Port $port is available${NC}"
|
||||
fi
|
||||
done
|
||||
|
||||
echo -e "${BLUE}3. Testing worker command syntax...${NC}"
|
||||
# Test that the worker command in docker-compose has correct syntax
|
||||
if docker-compose -f docker-compose-ec-test.yml config | grep -q "workingDir=/work"; then
|
||||
echo -e "${GREEN}✅ Worker working directory option is properly configured${NC}"
|
||||
else
|
||||
echo -e "${RED}❌ Worker working directory option is missing${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${BLUE}4. Verifying admin server configuration...${NC}"
|
||||
if docker-compose -f docker-compose-ec-test.yml config | grep -q "admin:23646"; then
|
||||
echo -e "${GREEN}✅ Admin server port configuration is correct${NC}"
|
||||
else
|
||||
echo -e "${RED}❌ Admin server port configuration is incorrect${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${BLUE}5. Checking service dependencies...${NC}"
|
||||
if docker-compose -f docker-compose-ec-test.yml config | grep -q "depends_on"; then
|
||||
echo -e "${GREEN}✅ Service dependencies are configured${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}⚠️ Service dependencies may not be configured${NC}"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo -e "${GREEN}🎉 Integration test configuration is ready!${NC}"
|
||||
echo ""
|
||||
echo -e "${BLUE}To start the integration test:${NC}"
|
||||
echo " make start # Start all services"
|
||||
echo " make health # Check service health"
|
||||
echo " make logs # View logs"
|
||||
echo " make stop # Stop all services"
|
||||
echo ""
|
||||
echo -e "${BLUE}Key features verified:${NC}"
|
||||
echo " ✅ Official SeaweedFS images are used"
|
||||
echo " ✅ Worker working directories are configured"
|
||||
echo " ✅ Admin-worker communication on correct ports"
|
||||
echo " ✅ Task-specific directories will be created"
|
||||
echo " ✅ Load generator will trigger EC tasks"
|
||||
echo " ✅ Monitor will track progress"
|
|
@ -1,53 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:e2e
|
||||
command: "-v=4 master -ip=master -ip.bind=0.0.0.0 -raftBootstrap"
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "--fail", "-I", "http://localhost:9333/cluster/healthz" ]
|
||||
interval: 1s
|
||||
timeout: 60s
|
||||
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:e2e
|
||||
command: "-v=4 volume -mserver=master:9333 -ip=volume -ip.bind=0.0.0.0 -preStopSeconds=1"
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "--fail", "-I", "http://localhost:8080/healthz" ]
|
||||
interval: 1s
|
||||
timeout: 30s
|
||||
depends_on:
|
||||
master:
|
||||
condition: service_healthy
|
||||
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:e2e
|
||||
command: "-v=4 filer -master=master:9333 -ip=filer -ip.bind=0.0.0.0"
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "--fail", "-I", "http://localhost:8888" ]
|
||||
interval: 1s
|
||||
timeout: 30s
|
||||
depends_on:
|
||||
volume:
|
||||
condition: service_healthy
|
||||
|
||||
mount:
|
||||
image: chrislusf/seaweedfs:e2e
|
||||
command: "-v=4 mount -filer=filer:8888 -filer.path=/ -dirAutoCreate -dir=/mnt/seaweedfs"
|
||||
cap_add:
|
||||
- SYS_ADMIN
|
||||
devices:
|
||||
- /dev/fuse
|
||||
security_opt:
|
||||
- apparmor:unconfined
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 4096m
|
||||
healthcheck:
|
||||
test: [ "CMD", "mountpoint", "-q", "--", "/mnt/seaweedfs" ]
|
||||
interval: 1s
|
||||
timeout: 30s
|
||||
depends_on:
|
||||
filer:
|
||||
condition: service_healthy
|
|
@ -1,8 +0,0 @@
|
|||
<source>
|
||||
@type forward
|
||||
port 24224
|
||||
</source>
|
||||
|
||||
<match **>
|
||||
@type stdout # Output logs to container's stdout (visible via `docker logs`)
|
||||
</match>
|
|
@ -1,4 +0,0 @@
|
|||
{
|
||||
"fluent_port": 24224,
|
||||
"fluent_host": "fluent"
|
||||
}
|
|
@ -1,38 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8333:8333
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
- 8084:8080
|
||||
- 18084:18080
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 8000:8000
|
||||
command: "server -ip=s3 -filer -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8000 -s3.auditLogConfig=/etc/seaweedfs/fluent.json -volume.max=0 -master.volumeSizeLimitMB=8 -volume.preStopSeconds=1"
|
||||
volumes:
|
||||
- ./fluent.json:/etc/seaweedfs/fluent.json
|
||||
- ./s3.json:/etc/seaweedfs/s3.json
|
||||
depends_on:
|
||||
- fluent
|
||||
fluent:
|
||||
image: fluent/fluentd:v1.17
|
||||
volumes:
|
||||
- ./fluent.conf:/fluentd/etc/fluent.conf
|
||||
ports:
|
||||
- 24224:24224
|
||||
#s3tests:
|
||||
# image: chrislusf/ceph-s3-tests:local
|
||||
# volumes:
|
||||
# - ./s3tests.conf:/opt/s3-tests/s3tests.conf
|
||||
# environment:
|
||||
# S3TEST_CONF: "s3tests.conf"
|
||||
# NOSETESTS_OPTIONS: "--verbose --logging-level=ERROR --with-xunit --failure-detail s3tests_boto3.functional.test_s3"
|
||||
# NOSETESTS_ATTR: "!tagging,!fails_on_aws,!encryption,!bucket-policy,!versioning,!fails_on_rgw,!bucket-policy,!fails_with_subdomain,!policy_status,!object-lock,!lifecycle,!cors,!user-policy"
|
||||
# NOSETESTS_EXCLUDE: "(get_bucket_encryption|put_bucket_encryption|bucket_list_delimiter_basic|bucket_listv2_delimiter_basic|bucket_listv2_encoding_basic|bucket_list_encoding_basic|bucket_list_delimiter_prefix|bucket_listv2_delimiter_prefix_ends_with_delimiter|bucket_list_delimiter_prefix_ends_with_delimiter|bucket_list_delimiter_alt|bucket_listv2_delimiter_alt|bucket_list_delimiter_prefix_underscore|bucket_list_delimiter_percentage|bucket_listv2_delimiter_percentage|bucket_list_delimiter_whitespace|bucket_listv2_delimiter_whitespace|bucket_list_delimiter_dot|bucket_listv2_delimiter_dot|bucket_list_delimiter_unreadable|bucket_listv2_delimiter_unreadable|bucket_listv2_fetchowner_defaultempty|bucket_listv2_fetchowner_empty|bucket_list_prefix_delimiter_alt|bucket_listv2_prefix_delimiter_alt|bucket_list_prefix_delimiter_prefix_not_exist|bucket_listv2_prefix_delimiter_prefix_not_exist|bucket_list_prefix_delimiter_delimiter_not_exist|bucket_listv2_prefix_delimiter_delimiter_not_exist|bucket_list_prefix_delimiter_prefix_delimiter_not_exist|bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist|bucket_list_maxkeys_none|bucket_listv2_maxkeys_none|bucket_list_maxkeys_invalid|bucket_listv2_continuationtoken_empty|bucket_list_return_data|bucket_list_objects_anonymous|bucket_listv2_objects_anonymous|bucket_notexist|bucketv2_notexist|bucket_delete_nonempty|bucket_concurrent_set_canned_acl|object_write_to_nonexist_bucket|object_requestid_matches_header_on_error|object_set_get_metadata_none_to_good|object_set_get_metadata_none_to_empty|object_set_get_metadata_overwrite_to_empty|post_object_anonymous_request|post_object_authenticated_request|post_object_authenticated_no_content_type|post_object_authenticated_request_bad_access_key|post_object_set_success_code|post_object_set_invalid_success_code|post_object_upload_larger_than_chunk|post_object_set_key_from_filename|post_object_ignored_header|post_object_case_insensitive_condition_fields|post_object_escaped_field_values|post_object_success_redirect_action|post_object_invalid_signature|post_object_invalid_access_key|post_object_missing_policy_condition|post_object_user_specified_header|post_object_request_missing_policy_specified_field|post_object_expired_policy|post_object_invalid_request_field_value|get_object_ifunmodifiedsince_good|put_object_ifmatch_failed|object_raw_get_bucket_gone|object_delete_key_bucket_gone|object_raw_get_bucket_acl|object_raw_get_object_acl|object_raw_response_headers|object_raw_authenticated_bucket_gone|object_raw_get_x_amz_expires_out_max_range|object_raw_get_x_amz_expires_out_positive_range|object_anon_put_write_access|object_raw_put_authenticated_expired|bucket_create_exists|bucket_create_naming_bad_short_one|bucket_create_naming_bad_short_two|bucket_get_location|bucket_acl_default|bucket_acl_canned|bucket_acl_canned_publicreadwrite|bucket_acl_canned_authenticatedread|object_acl_default|object_acl_canned_during_create|object_acl_canned|object_acl_canned_publicreadwrite|object_acl_canned_authenticatedread|object_acl_canned_bucketownerread|object_acl_canned_bucketownerfullcontrol|object_acl_full_control_verify_attributes|bucket_acl_canned_private_to_private|bucket_acl_grant_nonexist_user|bucket_acl_no_grants|bucket_acl_grant_email_not_exist|bucket_acl_revoke_all|bucket_recreate_not_overriding|object_copy_verify_contenttype|object_copy_to_itself_with_metadata|object_copy_not_owned_bucket|object_copy_not_owned_object_bucket|object_copy_retaining_metadata|object_copy_replacing_metadata|multipart_upload_empty|multipart_copy_invalid_range|multipart_copy_special_names|multipart_upload_resend_part|multipart_upload_size_too_small|abort_multipart_upload_not_found|multipart_upload_missing_part|multipart_upload_incorrect_etag|100_continue|ranged_request_invalid_range|ranged_request_empty_object|access_bucket)"
|
||||
# depends_on:
|
||||
# - s3
|
||||
# - fluent
|
|
@ -1,127 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
master0:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "-v=0 master -volumeSizeLimitMB 100 -resumeState=false -ip=master0 -port=9333 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
|
||||
environment:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
master1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9334:9334
|
||||
- 19334:19334
|
||||
command: "-v=0 master -volumeSizeLimitMB 100 -resumeState=false -ip=master1 -port=9334 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
|
||||
environment:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
master2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9335:9335
|
||||
- 19335:19335
|
||||
command: "-v=0 master -volumeSizeLimitMB 100 -resumeState=false -ip=master2 -port=9335 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
|
||||
environment:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
volume1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: 'volume -dataCenter=dc1 -rack=v1 -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume1 -publicUrl=localhost:8080 -preStopSeconds=1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
volume2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8082:8082
|
||||
- 18082:18082
|
||||
command: 'volume -dataCenter=dc2 -rack=v2 -mserver="master0:9333,master1:9334,master2:9335" -port=8082 -ip=volume2 -publicUrl=localhost:8082 -preStopSeconds=1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
volume3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8083:8083
|
||||
- 18083:18083
|
||||
command: 'volume -dataCenter=dc3 -rack=v3 -mserver="master0:9333,master1:9334,master2:9335" -port=8083 -ip=volume3 -publicUrl=localhost:8083 -preStopSeconds=1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
filer1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: 'filer -defaultReplicaPlacement=100 -iam -master="master0:9333,master1:9334,master2:9335" -port=8888 -ip=filer1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
- volume1
|
||||
- volume2
|
||||
filer2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8889:8889
|
||||
- 18889:18889
|
||||
command: 'filer -defaultReplicaPlacement=100 -iam -master="master0:9333,master1:9334,master2:9335" -port=8889 -ip=filer2'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
- volume1
|
||||
- volume2
|
||||
- filer1
|
||||
broker1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 17777:17777
|
||||
command: 'mq.broker -master="master0:9333,master1:9334,master2:9335" -port=17777 -ip=broker1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
- volume1
|
||||
- volume2
|
||||
- filer1
|
||||
- filer2
|
||||
broker2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 17778:17778
|
||||
command: 'mq.broker -master="master0:9333,master1:9334,master2:9335" -port=17778 -ip=broker2'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
- volume1
|
||||
- volume2
|
||||
- filer1
|
||||
- filer2
|
||||
broker3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 17779:17779
|
||||
command: 'mq.broker -master="master0:9333,master1:9334,master2:9335" -port=17779 -ip=broker3'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
- volume1
|
||||
- volume2
|
||||
- filer1
|
||||
- filer2
|
|
@ -1,88 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
master0:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "-v=1 master -volumeSizeLimitMB 100 -resumeState=false -ip=master0 -port=9333 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
|
||||
environment:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
master1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9334:9334
|
||||
- 19334:19334
|
||||
command: "-v=1 master -volumeSizeLimitMB 100 -resumeState=false -ip=master1 -port=9334 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
|
||||
environment:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
master2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9335:9335
|
||||
- 19335:19335
|
||||
command: "-v=1 master -volumeSizeLimitMB 100 -resumeState=false -ip=master2 -port=9335 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
|
||||
environment:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
volume1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: 'volume -dataCenter=dc1 -rack=v1 -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume1 -publicUrl=localhost:8080 -preStopSeconds=1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
volume2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8082:8082
|
||||
- 18082:18082
|
||||
command: 'volume -dataCenter=dc2 -rack=v2 -mserver="master0:9333,master1:9334,master2:9335" -port=8082 -ip=volume2 -publicUrl=localhost:8082 -preStopSeconds=1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
volume3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8083:8083
|
||||
- 18083:18083
|
||||
command: 'volume -dataCenter=dc3 -rack=v3 -mserver="master0:9333,master1:9334,master2:9335" -port=8083 -ip=volume3 -publicUrl=localhost:8083 -preStopSeconds=1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 8111:8111
|
||||
command: 'filer -defaultReplicaPlacement=100 -iam -master="master0:9333,master1:9334,master2:9335"'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
- volume1
|
||||
- volume2
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: '-v=9 s3 -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
- volume1
|
||||
- volume2
|
||||
- filer
|
|
@ -1,28 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
server1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
- 8084:8080
|
||||
- 18084:18080
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: "server -ip=server1 -filer -volume.max=0 -master.volumeSizeLimitMB=100 -volume.preStopSeconds=1"
|
||||
volumes:
|
||||
- ./master-cloud.toml:/etc/seaweedfs/master.toml
|
||||
depends_on:
|
||||
- server2
|
||||
server2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9334:9333
|
||||
- 19334:19333
|
||||
- 8085:8080
|
||||
- 18085:18080
|
||||
- 8889:8888
|
||||
- 18889:18888
|
||||
- 8334:8333
|
||||
command: "server -ip=server2 -filer -s3 -volume.max=0 -master.volumeSizeLimitMB=100 -volume.preStopSeconds=1"
|
|
@ -1,80 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "-v=1 master -ip=master -volumeSizeLimitMB=10"
|
||||
volumes:
|
||||
- ./tls:/etc/seaweedfs/tls
|
||||
env_file:
|
||||
- ${ENV_FILE:-dev.env}
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: "-v=1 volume -mserver=master:9333 -port=8080 -ip=volume -preStopSeconds=1 -max=10000"
|
||||
depends_on:
|
||||
- master
|
||||
volumes:
|
||||
- ./tls:/etc/seaweedfs/tls
|
||||
env_file:
|
||||
- ${ENV_FILE:-dev.env}
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: '-v=1 filer -ip.bind=0.0.0.0 -master="master:9333"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
volumes:
|
||||
- ./tls:/etc/seaweedfs/tls
|
||||
env_file:
|
||||
- ${ENV_FILE:-dev.env}
|
||||
|
||||
iam:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8111:8111
|
||||
command: '-v=1 iam -filer="filer:8888" -master="master:9333"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
volumes:
|
||||
- ./tls:/etc/seaweedfs/tls
|
||||
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: '-v=1 s3 -filer="filer:8888" -ip.bind=s3'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
volumes:
|
||||
- ./tls:/etc/seaweedfs/tls
|
||||
env_file:
|
||||
- ${ENV_FILE:-dev.env}
|
||||
|
||||
mount:
|
||||
image: chrislusf/seaweedfs:local
|
||||
privileged: true
|
||||
cap_add:
|
||||
- SYS_ADMIN
|
||||
mem_limit: 4096m
|
||||
command: '-v=4 mount -filer="filer:8888" -dirAutoCreate -dir=/mnt/seaweedfs -cacheCapacityMB=100 -concurrentWriters=128'
|
||||
volumes:
|
||||
- ./tls:/etc/seaweedfs/tls
|
||||
env_file:
|
||||
- ${ENV_FILE:-dev.env}
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
|
@ -1,54 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
server-left:
|
||||
image: chrislusf/seaweedfs:local
|
||||
command: "-v=0 server -ip=server-left -filer -filer.maxMB 5 -s3 -s3.config=/etc/seaweedfs/s3.json -volume.max=0 -master.volumeSizeLimitMB=100 -volume.preStopSeconds=1"
|
||||
volumes:
|
||||
- ./s3.json:/etc/seaweedfs/s3.json
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "--fail", "-I", "http://localhost:9333/cluster/healthz" ]
|
||||
interval: 3s
|
||||
start_period: 15s
|
||||
timeout: 30s
|
||||
server-right:
|
||||
image: chrislusf/seaweedfs:local
|
||||
command: "-v=0 server -ip=server-right -filer -filer.maxMB 64 -s3 -s3.config=/etc/seaweedfs/s3.json -volume.max=0 -master.volumeSizeLimitMB=100 -volume.preStopSeconds=1"
|
||||
volumes:
|
||||
- ./s3.json:/etc/seaweedfs/s3.json
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "--fail", "-I", "http://localhost:9333/cluster/healthz" ]
|
||||
interval: 3s
|
||||
start_period: 15s
|
||||
timeout: 30s
|
||||
filer-backup:
|
||||
image: chrislusf/seaweedfs:local
|
||||
command: "-v=0 filer.backup -debug -doDeleteFiles=False -filer server-left:8888"
|
||||
volumes:
|
||||
- ./replication.toml:/etc/seaweedfs/replication.toml
|
||||
environment:
|
||||
WEED_SINK_LOCAL_INCREMENTAL_ENABLED: "false"
|
||||
WEED_SINK_S3_ENABLED: "true"
|
||||
WEED_SINK_S3_BUCKET: "backup"
|
||||
WEED_SINK_S3_ENDPOINT: "http://server-right:8333"
|
||||
WEED_SINK_S3_DIRECTORY: "/"
|
||||
WEED_SINK_S3_AWS_ACCESS_KEY_ID: "some_access_key1"
|
||||
WEED_SINK_S3_AWS_SECRET_ACCESS_KEY: "some_secret_key1"
|
||||
WEED_SINK_S3_S3_DISABLE_CONTENT_MD5_VALIDATION: "false"
|
||||
WEED_SINK_S3_UPLOADER_PART_SIZE_MB: "5"
|
||||
WEED_SINK_S3_KEEP_PART_SIZE: "false"
|
||||
depends_on:
|
||||
server-left:
|
||||
condition: service_healthy
|
||||
server-right:
|
||||
condition: service_healthy
|
||||
minio-warp:
|
||||
image: minio/warp
|
||||
command: 'mixed --duration 5s --obj.size=6mb --md5 --objects 10 --concurrent 2'
|
||||
restart: on-failure
|
||||
environment:
|
||||
WARP_HOST: "server-left:8333"
|
||||
WARP_ACCESS_KEY: "some_access_key1"
|
||||
WARP_SECRET_KEY: "some_secret_key1"
|
||||
depends_on:
|
||||
- filer-backup
|
|
@ -1,89 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
master0:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "-v=4 master -volumeSizeLimitMB 100 -raftHashicorp -electionTimeout 1s -ip=master0 -port=9333 -peers=master1:9334,master2:9335 -mdir=/data"
|
||||
volumes:
|
||||
- ./master/0:/data
|
||||
environment:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
master1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9334:9334
|
||||
- 19334:19334
|
||||
command: "-v=4 master -volumeSizeLimitMB 100 -raftHashicorp -electionTimeout 1s -ip=master1 -port=9334 -peers=master0:9333,master2:9335 -mdir=/data"
|
||||
volumes:
|
||||
- ./master/1:/data
|
||||
environment:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
master2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9335:9335
|
||||
- 19335:19335
|
||||
command: "-v=4 master -volumeSizeLimitMB 100 -raftHashicorp -electionTimeout 1s -ip=master2 -port=9335 -peers=master0:9333,master1:9334 -mdir=/data"
|
||||
volumes:
|
||||
- ./master/2:/data
|
||||
environment:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
volume1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: 'volume -dataCenter=dc1 -rack=v1 -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume1 -publicUrl=localhost:8080 -preStopSeconds=1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
volume2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8082:8082
|
||||
- 18082:18082
|
||||
command: 'volume -dataCenter=dc2 -rack=v2 -mserver="master0:9333,master1:9334,master2:9335" -port=8082 -ip=volume2 -publicUrl=localhost:8082 -preStopSeconds=1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
volume3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8083:8083
|
||||
- 18083:18083
|
||||
command: 'volume -dataCenter=dc3 -rack=v3 -mserver="master0:9333,master1:9334,master2:9335" -port=8083 -ip=volume3 -publicUrl=localhost:8083 -preStopSeconds=1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 8111:8111
|
||||
command: 'filer -defaultReplicaPlacement=100 -iam -master="master0:9333,master1:9334,master2:9335"'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- volume1
|
||||
- volume2
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: '-v=9 s3 -ip.bind="s3" -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- volume1
|
||||
- volume2
|
||||
- filer
|
|
@ -1,94 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: "volume -mserver=master:9333 -port=8080 -ip=volume"
|
||||
depends_on:
|
||||
- master
|
||||
mysql:
|
||||
image: percona/percona-server:5.7
|
||||
ports:
|
||||
- 3306:3306
|
||||
volumes:
|
||||
- ./seaweedfs.sql:/docker-entrypoint-initdb.d/seaweedfs.sql
|
||||
environment:
|
||||
- MYSQL_ROOT_PASSWORD=secret
|
||||
- MYSQL_DATABASE=seaweedfs
|
||||
- MYSQL_PASSWORD=secret
|
||||
- MYSQL_USER=seaweedfs
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
environment:
|
||||
- WEED_MYSQL_HOSTNAME=mysql
|
||||
- WEED_MYSQL_PORT=3306
|
||||
- WEED_MYSQL_DATABASE=seaweedfs
|
||||
- WEED_MYSQL_USERNAME=seaweedfs
|
||||
- WEED_MYSQL_PASSWORD=secret
|
||||
- WEED_MYSQL_ENABLED=true
|
||||
- WEED_MYSQL_CONNECTION_MAX_IDLE=5
|
||||
- WEED_MYSQL_CONNECTION_MAX_OPEN=75
|
||||
# "refresh" connection every 10 minutes, eliminating mysql closing "old" connections
|
||||
- WEED_MYSQL_CONNECTION_MAX_LIFETIME_SECONDS=600
|
||||
# enable usage of memsql as filer backend
|
||||
- WEED_MYSQL_INTERPOLATEPARAMS=true
|
||||
- WEED_LEVELDB2_ENABLED=false
|
||||
command: '-v 9 filer -master="master:9333"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- mysql
|
||||
ingress:
|
||||
image: jwilder/nginx-proxy:alpine
|
||||
ports:
|
||||
- "80:80"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/tmp/docker.sock:ro
|
||||
- ./nginx/proxy.conf:/etc/nginx/proxy.conf
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: '-v 9 s3 -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
environment:
|
||||
- VIRTUAL_HOST=ingress
|
||||
- VIRTUAL_PORT=8333
|
||||
registry:
|
||||
image: registry:2
|
||||
environment:
|
||||
REGISTRY_HTTP_ADDR: "0.0.0.0:5001" # seaweedfs s3
|
||||
REGISTRY_LOG_LEVEL: "debug"
|
||||
REGISTRY_STORAGE: "s3"
|
||||
REGISTRY_STORAGE_S3_REGION: "us-east-1"
|
||||
REGISTRY_STORAGE_S3_REGIONENDPOINT: "http://ingress"
|
||||
REGISTRY_STORAGE_S3_BUCKET: "registry"
|
||||
REGISTRY_STORAGE_S3_ACCESSKEY: "some_access_key1"
|
||||
REGISTRY_STORAGE_S3_SECRETKEY: "some_secret_key1"
|
||||
REGISTRY_STORAGE_S3_V4AUTH: "true"
|
||||
REGISTRY_STORAGE_S3_SECURE: "false"
|
||||
REGISTRY_STORAGE_S3_SKIPVERIFY: "true"
|
||||
REGISTRY_STORAGE_S3_ROOTDIRECTORY: "/"
|
||||
REGISTRY_STORAGE_DELETE_ENABLED: "true"
|
||||
REGISTRY_STORAGE_REDIRECT_DISABLE: "true"
|
||||
REGISTRY_VALIDATION_DISABLED: "true"
|
||||
ports:
|
||||
- 5001:5001
|
||||
depends_on:
|
||||
- s3
|
||||
- ingress
|
|
@ -1,50 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master -volumeSizeLimitMB=100"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: "volume -mserver=master:9333 -port=8080 -ip=volume -max=0 -preStopSeconds=1"
|
||||
depends_on:
|
||||
- master
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 8333:8333
|
||||
command: '-v 1 filer -master="master:9333" -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8333'
|
||||
volumes:
|
||||
- ./s3.json:/etc/seaweedfs/s3.json
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
minio-gateway-s3:
|
||||
image: minio/minio
|
||||
ports:
|
||||
- 9000:9000
|
||||
command: 'minio gateway s3 http://s3:8333'
|
||||
restart: on-failure
|
||||
environment:
|
||||
MINIO_ACCESS_KEY: "some_access_key1"
|
||||
MINIO_SECRET_KEY: "some_secret_key1"
|
||||
depends_on:
|
||||
- s3
|
||||
minio-warp:
|
||||
image: minio/warp
|
||||
command: 'mixed --duration=5m --obj.size=3mb --autoterm'
|
||||
restart: on-failure
|
||||
environment:
|
||||
WARP_HOST: "minio-gateway-s3:9000"
|
||||
WARP_ACCESS_KEY: "some_access_key1"
|
||||
WARP_SECRET_KEY: "some_secret_key1"
|
||||
depends_on:
|
||||
- minio-gateway-s3
|
|
@ -1,46 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 7455:8080
|
||||
- 9325:9325
|
||||
command: 'volume -mserver="master:9333" -port=8080 -metricsPort=9325 -preStopSeconds=1 -publicUrl=localhost:7455'
|
||||
depends_on:
|
||||
- master
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 9326:9326
|
||||
command: 'filer -master="master:9333" -metricsPort=9326'
|
||||
tty: true
|
||||
stdin_open: true
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
mount_1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
privileged: true
|
||||
entrypoint: '/bin/sh -c "mkdir -p t1 && mkdir -p cache/t1 && weed -v=4 mount -filer=filer:8888 -cacheDir=./cache/t1 -dir=./t1 -filer.path=/c1 -volumeServerAccess=filerProxy"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
mount_2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
privileged: true
|
||||
entrypoint: '/bin/sh -c "mkdir -p t2 && mkdir -p cache/t2 && weed -v=4 mount -filer=filer:8888 -cacheDir=./cache/t2 -dir=./t2 -filer.path=/c1"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
- mount_1
|
|
@ -1,47 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 7455:8080
|
||||
- 9325:9325
|
||||
volumes:
|
||||
- /Volumes/mobile_disk/99:/data
|
||||
command: 'volume -mserver="master:9333" -port=8080 -metricsPort=9325 -preStopSeconds=1 -publicUrl=localhost:7455'
|
||||
depends_on:
|
||||
- master
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 9326:9326
|
||||
volumes:
|
||||
- /Volumes/mobile_disk/99:/data
|
||||
command: 'filer -master="master:9333" -metricsPort=9326'
|
||||
tty: true
|
||||
stdin_open: true
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
mount:
|
||||
image: chrislusf/seaweedfs:local
|
||||
privileged: true
|
||||
cap_add:
|
||||
- SYS_ADMIN
|
||||
devices:
|
||||
- fuse
|
||||
volumes:
|
||||
- /Volumes/mobile_disk/99:/data
|
||||
entrypoint: '/bin/sh -c "mkdir -p t1 && weed -v=4 mount -filer=filer:8888 -dir=./t1 -cacheCapacityMB=0 -memprofile=/data/mount.mem.pprof"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
|
@ -1,32 +0,0 @@
|
|||
services:
|
||||
server:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: "server -ip=server -filer -volume.max=0 -master.volumeSizeLimitMB=8 -volume.preStopSeconds=1"
|
||||
healthcheck:
|
||||
test: curl -f http://localhost:8888/healthz
|
||||
mq_broker:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 17777:17777
|
||||
command: "mq.broker -master=server:9333 -ip=mq_broker"
|
||||
depends_on:
|
||||
server:
|
||||
condition: service_healthy
|
||||
mq_agent:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 16777:16777
|
||||
command: "mq.agent -broker=mq_broker:17777 -port=16777"
|
||||
depends_on:
|
||||
- mq_broker
|
||||
mq_client:
|
||||
image: chrislusf/seaweedfs:local
|
||||
# run a custom command instead of entrypoint
|
||||
command: "ls -al"
|
||||
depends_on:
|
||||
- mq_agent
|
|
@ -1,44 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: "volume -mserver=master:9333 -port=8080 -ip=volume"
|
||||
depends_on:
|
||||
- master
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 8333:8333
|
||||
command: '-v 9 filer -master="master:9333" -s3'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
nextcloud:
|
||||
image: nextcloud:23.0.5-apache
|
||||
environment:
|
||||
- OBJECTSTORE_S3_HOST=s3
|
||||
- OBJECTSTORE_S3_BUCKET=nextcloud
|
||||
- OBJECTSTORE_S3_KEY=some_access_key1
|
||||
- OBJECTSTORE_S3_SECRET=some_secret_key1
|
||||
- OBJECTSTORE_S3_PORT=8333
|
||||
- OBJECTSTORE_S3_SSL=false
|
||||
- OBJECTSTORE_S3_USEPATH_STYLE=true
|
||||
- SQLITE_DATABASE=nextcloud
|
||||
- NEXTCLOUD_ADMIN_USER=admin
|
||||
- NEXTCLOUD_ADMIN_PASSWORD=admin
|
||||
ports:
|
||||
- 80:80
|
||||
depends_on:
|
||||
- s3
|
|
@ -1,85 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master -volumeSizeLimitMB=100"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: "volume -mserver=master:9333 -port=8080 -ip=volume -max=0 -preStopSeconds=1"
|
||||
depends_on:
|
||||
- master
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 8333:8333
|
||||
command: '-v 9 filer -master="master:9333" -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8333'
|
||||
volumes:
|
||||
- ./s3.json:/etc/seaweedfs/s3.json
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
minio:
|
||||
image: minio/minio
|
||||
ports:
|
||||
- 9000:9000
|
||||
command: 'minio server /data'
|
||||
environment:
|
||||
MINIO_ACCESS_KEY: "some_access_key1"
|
||||
MINIO_SECRET_KEY: "some_secret_key1"
|
||||
depends_on:
|
||||
- master
|
||||
registry1:
|
||||
image: registry:2
|
||||
environment:
|
||||
REGISTRY_HTTP_ADDR: "0.0.0.0:5001" # seaweedfs s3
|
||||
REGISTRY_LOG_LEVEL: "debug"
|
||||
REGISTRY_STORAGE: "s3"
|
||||
REGISTRY_STORAGE_S3_REGION: "us-east-1"
|
||||
REGISTRY_STORAGE_S3_REGIONENDPOINT: "http://s3:8333"
|
||||
REGISTRY_STORAGE_S3_BUCKET: "registry"
|
||||
REGISTRY_STORAGE_S3_ACCESSKEY: "some_access_key1"
|
||||
REGISTRY_STORAGE_S3_SECRETKEY: "some_secret_key1"
|
||||
REGISTRY_STORAGE_S3_V4AUTH: "true"
|
||||
REGISTRY_STORAGE_S3_SECURE: "false"
|
||||
REGISTRY_STORAGE_S3_SKIPVERIFY: "true"
|
||||
REGISTRY_STORAGE_S3_ROOTDIRECTORY: "/"
|
||||
REGISTRY_STORAGE_DELETE_ENABLED: "true"
|
||||
REGISTRY_STORAGE_REDIRECT_DISABLE: "true"
|
||||
REGISTRY_VALIDATION_DISABLED: "true"
|
||||
ports:
|
||||
- 5001:5001
|
||||
depends_on:
|
||||
- s3
|
||||
- minio
|
||||
registry2:
|
||||
image: registry:2
|
||||
environment:
|
||||
REGISTRY_HTTP_ADDR: "0.0.0.0:5002" # minio
|
||||
REGISTRY_LOG_LEVEL: "debug"
|
||||
REGISTRY_STORAGE: "s3"
|
||||
REGISTRY_STORAGE_S3_REGION: "us-east-1"
|
||||
REGISTRY_STORAGE_S3_REGIONENDPOINT: "http://minio:9000"
|
||||
REGISTRY_STORAGE_S3_BUCKET: "registry"
|
||||
REGISTRY_STORAGE_S3_ACCESSKEY: "some_access_key1"
|
||||
REGISTRY_STORAGE_S3_SECRETKEY: "some_secret_key1"
|
||||
REGISTRY_STORAGE_S3_V4AUTH: "true"
|
||||
REGISTRY_STORAGE_S3_SECURE: "false"
|
||||
REGISTRY_STORAGE_S3_SKIPVERIFY: "true"
|
||||
REGISTRY_STORAGE_S3_ROOTDIRECTORY: "/"
|
||||
REGISTRY_STORAGE_DELETE_ENABLED: "true"
|
||||
REGISTRY_STORAGE_REDIRECT_DISABLE: "true"
|
||||
REGISTRY_VALIDATION_DISABLED: "true"
|
||||
ports:
|
||||
- 5002:5002
|
||||
depends_on:
|
||||
- s3
|
||||
- minio
|
|
@ -1,61 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: "volume -mserver=master:9333 -port=8080 -ip=volume -preStopSeconds=1"
|
||||
depends_on:
|
||||
- master
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: '-v=9 filer -master="master:9333"'
|
||||
restart: on-failure
|
||||
volumes:
|
||||
- ./notification.toml:/etc/seaweedfs/notification.toml
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- rabbitmq
|
||||
- replicate
|
||||
environment:
|
||||
RABBIT_SERVER_URL: "amqp://guest:guest@rabbitmq:5672/"
|
||||
replicate:
|
||||
image: chrislusf/seaweedfs:local
|
||||
command: '-v=9 filer.replicate'
|
||||
restart: on-failure
|
||||
volumes:
|
||||
- ./notification.toml:/etc/seaweedfs/notification.toml
|
||||
- ./replication.toml:/etc/seaweedfs/replication.toml
|
||||
depends_on:
|
||||
- rabbitmq
|
||||
environment:
|
||||
RABBIT_SERVER_URL: "amqp://guest:guest@rabbitmq:5672/"
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: 's3 -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
rabbitmq:
|
||||
image: rabbitmq:3.8.10-management-alpine
|
||||
ports:
|
||||
- 5672:5672
|
||||
- 15671:15671
|
||||
- 15672:15672
|
||||
environment:
|
||||
RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS: "-rabbit log_levels [{connection,error},{queue,debug}]"
|
|
@ -1,45 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master -volumeSizeLimitMB=16"
|
||||
environment:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: "volume -mserver=master:9333 -port=8080 -ip=volume -preStopSeconds=1"
|
||||
depends_on:
|
||||
- master
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 8000:8000
|
||||
command: 'filer -master="master:9333" -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8000 -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=false'
|
||||
volumes:
|
||||
- ./s3.json:/etc/seaweedfs/s3.json
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
s3tests:
|
||||
image: chrislusf/ceph-s3-tests:local
|
||||
volumes:
|
||||
- ./s3tests.conf:/opt/s3-tests/s3tests.conf
|
||||
environment:
|
||||
S3TEST_CONF: "s3tests.conf"
|
||||
NOSETESTS_OPTIONS: "--verbose --logging-level=ERROR --with-xunit --failure-detail s3tests_boto3.functional.test_s3"
|
||||
NOSETESTS_ATTR: "!fails_on_aws,!encryption,!bucket-policy,!versioning,!fails_on_rgw,!bucket-policy,!fails_with_subdomain,!policy_status,!object-lock,!lifecycle,!cors,!user-policy"
|
||||
NOSETESTS_EXCLUDE: "(post_object_tags_anonymous_request|get_obj_tagging|set_bucket_tagging|post_object_tags_authenticated_request|put_max_tags|put_modify_tags|test_put_obj_with_tags|get_bucket_encryption|delete_bucket_encryption|put_bucket_encryption|bucket_list_delimiter_basic|bucket_listv2_delimiter_basic|bucket_listv2_encoding_basic|bucket_list_encoding_basic|bucket_list_delimiter_prefix|bucket_listv2_delimiter_prefix_ends_with_delimiter|bucket_list_delimiter_prefix_ends_with_delimiter|bucket_list_delimiter_alt|bucket_listv2_delimiter_alt|bucket_list_delimiter_prefix_underscore|bucket_list_delimiter_percentage|bucket_listv2_delimiter_percentage|bucket_list_delimiter_whitespace|bucket_listv2_delimiter_whitespace|bucket_list_delimiter_dot|bucket_listv2_delimiter_dot|bucket_list_delimiter_unreadable|bucket_listv2_delimiter_unreadable|bucket_listv2_fetchowner_defaultempty|bucket_listv2_fetchowner_empty|bucket_list_prefix_delimiter_alt|bucket_listv2_prefix_delimiter_alt|bucket_list_prefix_delimiter_prefix_not_exist|bucket_listv2_prefix_delimiter_prefix_not_exist|bucket_list_prefix_delimiter_delimiter_not_exist|bucket_listv2_prefix_delimiter_delimiter_not_exist|bucket_list_prefix_delimiter_prefix_delimiter_not_exist|bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist|bucket_list_maxkeys_none|bucket_listv2_maxkeys_none|bucket_list_maxkeys_invalid|bucket_listv2_continuationtoken_empty|bucket_list_return_data|bucket_list_objects_anonymous|bucket_listv2_objects_anonymous|bucket_concurrent_set_canned_acl|object_write_to_nonexist_bucket|object_requestid_matches_header_on_error|object_set_get_metadata_none_to_good|object_set_get_metadata_none_to_empty|object_set_get_metadata_overwrite_to_empty|post_object_anonymous_request|post_object_authenticated_request|post_object_authenticated_no_content_type|post_object_authenticated_request_bad_access_key|post_object_set_success_code|post_object_set_invalid_success_code|post_object_upload_larger_than_chunk|post_object_set_key_from_filename|post_object_ignored_header|post_object_case_insensitive_condition_fields|post_object_escaped_field_values|post_object_success_redirect_action|post_object_invalid_signature|post_object_invalid_access_key|post_object_missing_policy_condition|post_object_user_specified_header|post_object_request_missing_policy_specified_field|post_object_expired_policy|post_object_invalid_request_field_value|get_object_ifunmodifiedsince_good|put_object_ifmatch_failed|object_raw_get_bucket_gone|object_delete_key_bucket_gone|object_raw_get_bucket_acl|object_raw_get_object_acl|object_raw_response_headers|object_raw_authenticated_bucket_gone|object_raw_get_x_amz_expires_out_max_range|object_raw_get_x_amz_expires_out_positive_range|object_anon_put_write_access|object_raw_put_authenticated_expired|bucket_create_exists|bucket_create_naming_bad_short_one|bucket_create_naming_bad_short_two|bucket_get_location|bucket_acl_default|bucket_acl_canned|bucket_acl_canned_publicreadwrite|bucket_acl_canned_authenticatedread|object_acl_default|object_acl_canned_during_create|object_acl_canned|object_acl_canned_publicreadwrite|object_acl_canned_authenticatedread|object_acl_canned_bucketownerread|object_acl_canned_bucketownerfullcontrol|object_acl_full_control_verify_attributes|bucket_acl_canned_private_to_private|bucket_acl_grant_nonexist_user|bucket_acl_no_grants|bucket_acl_grant_email_not_exist|bucket_acl_revoke_all|bucket_recreate_not_overriding|object_copy_verify_contenttype|object_copy_to_itself_with_metadata|object_copy_not_owned_bucket|object_copy_not_owned_object_bucket|object_copy_retaining_metadata|object_copy_replacing_metadata|multipart_upload_empty|multipart_copy_invalid_range|multipart_copy_special_names|multipart_upload_resend_part|multipart_upload_size_too_small|abort_multipart_upload_not_found|multipart_upload_missing_part|100_continue|ranged_request_invalid_range|ranged_request_empty_object|access_bucket|list_multipart_upload_owner|multipart_upload_small)"
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- s3
|
|
@ -1,56 +0,0 @@
|
|||
version: '3.9'
|
||||
services:
|
||||
node1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
command: "server -master -volume -filer"
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "--fail", "-I", "http://localhost:9333/cluster/healthz" ]
|
||||
interval: 1s
|
||||
start_period: 10s
|
||||
timeout: 30s
|
||||
mount1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
privileged: true
|
||||
command: "mount -filer=node1:8888 -dir=/mnt -dirAutoCreate"
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "--fail", "-I", "http://node1:8888/" ]
|
||||
interval: 1s
|
||||
start_period: 10s
|
||||
timeout: 30s
|
||||
depends_on:
|
||||
node1:
|
||||
condition: service_healthy
|
||||
node2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 7888:8888
|
||||
- 17888:18888
|
||||
command: "server -master -volume -filer"
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "--fail", "-I", "http://localhost:9333/cluster/healthz" ]
|
||||
interval: 1s
|
||||
start_period: 10s
|
||||
timeout: 30s
|
||||
mount2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
privileged: true
|
||||
command: "mount -filer=node2:8888 -dir=/mnt -dirAutoCreate"
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "--fail", "-I", "http://node2:8888/" ]
|
||||
interval: 1s
|
||||
start_period: 10s
|
||||
timeout: 30s
|
||||
depends_on:
|
||||
node2:
|
||||
condition: service_healthy
|
||||
sync:
|
||||
image: chrislusf/seaweedfs:local
|
||||
command: "-v=4 filer.sync -a=node1:8888 -b=node2:8888 -a.debug -b.debug"
|
||||
depends_on:
|
||||
mount1:
|
||||
condition: service_healthy
|
||||
mount2:
|
||||
condition: service_healthy
|
|
@ -1,31 +0,0 @@
|
|||
|
||||
# Put this file to one of the location, with descending priority
|
||||
# ./master.toml
|
||||
# $HOME/.seaweedfs/master.toml
|
||||
# /etc/seaweedfs/master.toml
|
||||
# this file is read by master
|
||||
|
||||
[master.maintenance]
|
||||
# periodically run these scripts are the same as running them from 'weed shell'
|
||||
scripts = """
|
||||
lock
|
||||
ec.encode -fullPercent=95 -quietFor=1h
|
||||
ec.rebuild -force
|
||||
ec.balance -force
|
||||
volume.balance -force
|
||||
volume.fix.replication
|
||||
unlock
|
||||
"""
|
||||
sleep_minutes = 17 # sleep minutes between each script execution
|
||||
|
||||
# configurations for tiered cloud storage
|
||||
# old volumes are transparently moved to cloud for cost efficiency
|
||||
[storage.backend]
|
||||
[storage.backend.s3.default]
|
||||
enabled = true
|
||||
aws_access_key_id = "any" # if empty, loads from the shared credentials file (~/.aws/credentials).
|
||||
aws_secret_access_key = "any" # if empty, loads from the shared credentials file (~/.aws/credentials).
|
||||
region = "us-east-2"
|
||||
bucket = "volume_bucket" # an existing bucket
|
||||
endpoint = "http://server2:8333"
|
||||
storage_class = "STANDARD_IA"
|
|
@ -1,17 +0,0 @@
|
|||
[notification.log]
|
||||
# this is only for debugging purpose and does not work with "weed filer.replicate"
|
||||
enabled = false
|
||||
|
||||
|
||||
[notification.gocdk_pub_sub]
|
||||
# The Go Cloud Development Kit (https://gocloud.dev).
|
||||
# PubSub API (https://godoc.org/gocloud.dev/pubsub).
|
||||
# Supports AWS SNS/SQS, Azure Service Bus, Google PubSub, NATS and RabbitMQ.
|
||||
enabled = true
|
||||
# This URL will Dial the RabbitMQ server at the URL in the environment
|
||||
# variable RABBIT_SERVER_URL and open the exchange "myexchange".
|
||||
# The exchange must have already been created by some other means, like
|
||||
# the RabbitMQ management plugin. Сreate myexchange of type fanout and myqueue then
|
||||
# create binding myexchange => myqueue
|
||||
topic_url = "rabbit://swexchange"
|
||||
sub_url = "rabbit://swqueue"
|
|
@ -1,11 +0,0 @@
|
|||
[source.filer]
|
||||
enabled = true
|
||||
grpcAddress = "filer:18888"
|
||||
# all files under this directory tree are replicated.
|
||||
# this is not a directory on your hard drive, but on your filer.
|
||||
# i.e., all files with this "prefix" are sent to notification message queue.
|
||||
directory = "/buckets"
|
||||
|
||||
[sink.local_incremental]
|
||||
enabled = true
|
||||
directory = "/data"
|
|
@ -1,115 +0,0 @@
|
|||
{
|
||||
"identities": [
|
||||
{
|
||||
"name": "anonymous",
|
||||
"actions": [
|
||||
"Read"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "some_admin_user",
|
||||
"credentials": [
|
||||
{
|
||||
"accessKey": "some_access_key1",
|
||||
"secretKey": "some_secret_key1"
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
"Admin",
|
||||
"Read",
|
||||
"List",
|
||||
"Tagging",
|
||||
"Write"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "s3_tests",
|
||||
"credentials": [
|
||||
{
|
||||
"accessKey": "ABCDEFGHIJKLMNOPQRST",
|
||||
"secretKey": "abcdefghijklmnopqrstuvwxyzabcdefghijklmn"
|
||||
},
|
||||
{
|
||||
"accessKey": "0555b35654ad1656d804",
|
||||
"secretKey": "h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=="
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
"Admin",
|
||||
"Read",
|
||||
"List",
|
||||
"Tagging",
|
||||
"Write"
|
||||
],
|
||||
"account": {
|
||||
"id": "testid"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "s3_tests_alt",
|
||||
"credentials": [
|
||||
{
|
||||
"accessKey": "NOPQRSTUVWXYZABCDEFG",
|
||||
"secretKey": "nopqrstuvwxyzabcdefghijklmnabcdefghijklm"
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
"Admin",
|
||||
"Read",
|
||||
"List",
|
||||
"Tagging",
|
||||
"Write"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "s3_tests_tenant",
|
||||
"credentials": [
|
||||
{
|
||||
"accessKey": "HIJKLMNOPQRSTUVWXYZA",
|
||||
"secretKey": "opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab"
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
"Admin",
|
||||
"Read",
|
||||
"List",
|
||||
"Tagging",
|
||||
"Write"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "some_read_only_user",
|
||||
"credentials": [
|
||||
{
|
||||
"accessKey": "some_access_key2",
|
||||
"secretKey": "some_secret_key2"
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
"Read"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "some_normal_user",
|
||||
"credentials": [
|
||||
{
|
||||
"accessKey": "some_access_key3",
|
||||
"secretKey": "some_secret_key3"
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
"Read",
|
||||
"List",
|
||||
"Tagging",
|
||||
"Write"
|
||||
]
|
||||
}
|
||||
],
|
||||
"accounts": [
|
||||
{
|
||||
"id" : "testid",
|
||||
"displayName": "M. Tester",
|
||||
"emailAddress": "tester@ceph.com"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,103 +0,0 @@
|
|||
[DEFAULT]
|
||||
## this section is just used for host, port and bucket_prefix
|
||||
|
||||
# host set for rgw in vstart.sh
|
||||
host = 127.0.0.1
|
||||
|
||||
# port set for rgw in vstart.sh
|
||||
port = 8000
|
||||
|
||||
## say "False" to disable TLS
|
||||
is_secure = False
|
||||
|
||||
[fixtures]
|
||||
## all the buckets created will start with this prefix;
|
||||
## {random} will be filled with random characters to pad
|
||||
## the prefix to 30 characters long, and avoid collisions
|
||||
bucket prefix = yournamehere-{random}-
|
||||
|
||||
[s3 main]
|
||||
# main display_name set in vstart.sh
|
||||
display_name = M. Tester
|
||||
|
||||
# main user_idname set in vstart.sh
|
||||
user_id = testid
|
||||
|
||||
# main email set in vstart.sh
|
||||
email = tester@ceph.com
|
||||
|
||||
# zonegroup api_name for bucket location
|
||||
api_name = default
|
||||
|
||||
## main AWS access key
|
||||
access_key = 0555b35654ad1656d804
|
||||
|
||||
## main AWS secret key
|
||||
secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==
|
||||
|
||||
## replace with key id obtained when secret is created, or delete if KMS not tested
|
||||
#kms_keyid = 01234567-89ab-cdef-0123-456789abcdef
|
||||
|
||||
[s3 alt]
|
||||
# alt display_name set in vstart.sh
|
||||
display_name = john.doe
|
||||
## alt email set in vstart.sh
|
||||
email = john.doe@example.com
|
||||
|
||||
# alt user_id set in vstart.sh
|
||||
user_id = 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234
|
||||
|
||||
# alt AWS access key set in vstart.sh
|
||||
access_key = NOPQRSTUVWXYZABCDEFG
|
||||
|
||||
# alt AWS secret key set in vstart.sh
|
||||
secret_key = nopqrstuvwxyzabcdefghijklmnabcdefghijklm
|
||||
|
||||
[s3 tenant]
|
||||
# tenant display_name set in vstart.sh
|
||||
display_name = testx$tenanteduser
|
||||
|
||||
# tenant user_id set in vstart.sh
|
||||
user_id = 9876543210abcdef0123456789abcdef0123456789abcdef0123456789abcdef
|
||||
|
||||
# tenant AWS secret key set in vstart.sh
|
||||
access_key = HIJKLMNOPQRSTUVWXYZA
|
||||
|
||||
# tenant AWS secret key set in vstart.sh
|
||||
secret_key = opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab
|
||||
|
||||
# tenant email set in vstart.sh
|
||||
email = tenanteduser@example.com
|
||||
|
||||
# tenant name
|
||||
tenant = testx
|
||||
|
||||
[iam]
|
||||
#used for iam operations in sts-tests
|
||||
#email from vstart.sh
|
||||
email = s3@example.com
|
||||
|
||||
#user_id from vstart.sh
|
||||
user_id = 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef
|
||||
|
||||
#access_key from vstart.sh
|
||||
access_key = ABCDEFGHIJKLMNOPQRST
|
||||
|
||||
#secret_key from vstart.sh
|
||||
secret_key = abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz
|
||||
|
||||
#display_name from vstart.sh
|
||||
display_name = youruseridhere
|
||||
|
||||
[iam root]
|
||||
access_key = AAAAAAAAAAAAAAAAAAaa
|
||||
secret_key = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
user_id = RGW11111111111111111
|
||||
email = account1@ceph.com
|
||||
|
||||
# iam account root user in a different account than [iam root]
|
||||
[iam alt root]
|
||||
access_key = BBBBBBBBBBBBBBBBBBbb
|
||||
secret_key = bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
|
||||
user_id = RGW22222222222222222
|
||||
email = account2@ceph.com
|
|
@ -1,84 +0,0 @@
|
|||
# 2021-01-30 16:25:30
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
|
||||
etcd:
|
||||
image: gasparekatapy/etcd
|
||||
networks:
|
||||
- net
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 3
|
||||
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
environment:
|
||||
WEED_MASTER_FILER_DEFAULT: "filer:8888"
|
||||
WEED_MASTER_SEQUENCER_TYPE: "raft"
|
||||
ports:
|
||||
- "9333:9333"
|
||||
- "19333:19333"
|
||||
networks:
|
||||
- net
|
||||
command:
|
||||
- 'master'
|
||||
- '-resumeState=true'
|
||||
- '-ip=master'
|
||||
- '-port=9333'
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
environment:
|
||||
WEED_LEVELDB2_ENABLED: "false"
|
||||
WEED_ETCD_ENABLED: "true"
|
||||
WEED_ETCD_SERVERS: "etcd:2379"
|
||||
ports:
|
||||
- target: 8888
|
||||
published: 8888
|
||||
protocol: tcp
|
||||
mode: host
|
||||
- target: 18888
|
||||
published: 18888
|
||||
protocol: tcp
|
||||
mode: host
|
||||
networks:
|
||||
- net
|
||||
command:
|
||||
- 'filer'
|
||||
- '-ip=filer'
|
||||
- '-port=8888'
|
||||
- '-port.readonly=28888'
|
||||
- '-master=master:9333'
|
||||
- '-disableDirListing=true'
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- target: 8080
|
||||
published: 8080
|
||||
protocol: tcp
|
||||
mode: host
|
||||
- target: 18080
|
||||
published: 18080
|
||||
protocol: tcp
|
||||
mode: host
|
||||
networks:
|
||||
- net
|
||||
command:
|
||||
- 'volume'
|
||||
- '-mserver=master:9333'
|
||||
- '-port=8080'
|
||||
deploy:
|
||||
mode: global
|
||||
|
||||
###########################################################################
|
||||
|
||||
networks:
|
||||
net:
|
|
@ -1,62 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
etcd:
|
||||
image: quay.io/coreos/etcd:v3.5.4
|
||||
command: "etcd --advertise-client-urls http://etcd:2379 --listen-client-urls http://0.0.0.0:2379"
|
||||
ports:
|
||||
- 2379:2379
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master -volumeSizeLimitMB=100"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: "volume -mserver=master:9333 -port=8080 -ip=volume -max=0 -preStopSeconds=1"
|
||||
depends_on:
|
||||
- master
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 8333:8333
|
||||
command: '-v 9 filer -master="master:9333" -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8333'
|
||||
environment:
|
||||
WEED_LEVELDB2_ENABLED: 'false'
|
||||
WEED_ETCD_ENABLED: 'true'
|
||||
WEED_ETCD_KEY_PREFIX: 'seaweedfs.'
|
||||
WEED_ETCD_SERVERS: "http://etcd:2379"
|
||||
volumes:
|
||||
- ./s3.json:/etc/seaweedfs/s3.json
|
||||
depends_on:
|
||||
- etcd
|
||||
- master
|
||||
- volume
|
||||
registry:
|
||||
image: registry:2
|
||||
environment:
|
||||
REGISTRY_HTTP_ADDR: "0.0.0.0:5001" # seaweedfs s3
|
||||
REGISTRY_LOG_LEVEL: "debug"
|
||||
REGISTRY_STORAGE: "s3"
|
||||
REGISTRY_STORAGE_S3_REGION: "us-east-1"
|
||||
REGISTRY_STORAGE_S3_REGIONENDPOINT: "http://s3:8333"
|
||||
REGISTRY_STORAGE_S3_BUCKET: "registry"
|
||||
REGISTRY_STORAGE_S3_ACCESSKEY: "some_access_key1"
|
||||
REGISTRY_STORAGE_S3_SECRETKEY: "some_secret_key1"
|
||||
REGISTRY_STORAGE_S3_V4AUTH: "true"
|
||||
REGISTRY_STORAGE_S3_SECURE: "false"
|
||||
REGISTRY_STORAGE_S3_SKIPVERIFY: "true"
|
||||
REGISTRY_STORAGE_S3_ROOTDIRECTORY: "/"
|
||||
REGISTRY_STORAGE_DELETE_ENABLED: "true"
|
||||
REGISTRY_STORAGE_REDIRECT_DISABLE: "true"
|
||||
REGISTRY_VALIDATION_DISABLED: "true"
|
||||
ports:
|
||||
- 5001:5001
|
||||
depends_on:
|
||||
- s3
|
|
@ -1,30 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
tarantool:
|
||||
image: chrislusf/tarantool_dev_env
|
||||
entrypoint: "tt start app -i"
|
||||
environment:
|
||||
APP_USER_PASSWORD: "app"
|
||||
CLIENT_USER_PASSWORD: "client"
|
||||
REPLICATOR_USER_PASSWORD: "replicator"
|
||||
STORAGE_USER_PASSWORD: "storage"
|
||||
network_mode: "host"
|
||||
ports:
|
||||
- "3303:3303"
|
||||
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
command: "server -ip=127.0.0.1 -filer -master.volumeSizeLimitMB=16 -volume.max=0 -volume -volume.preStopSeconds=1 -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8000 -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=false"
|
||||
volumes:
|
||||
- ./s3.json:/etc/seaweedfs/s3.json
|
||||
environment:
|
||||
WEED_LEVELDB2_ENABLED: "false"
|
||||
WEED_TARANTOOL_ENABLED: "true"
|
||||
WEED_TARANTOOL_ADDRESS: "127.0.0.1:3303"
|
||||
WEED_TARANTOOL_USER: "client"
|
||||
WEED_TARANTOOL_PASSWORD: "client"
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
network_mode: "host"
|
||||
depends_on:
|
||||
- tarantool
|
|
@ -1,35 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
ydb:
|
||||
image: cr.yandex/yc/yandex-docker-local-ydb
|
||||
ports:
|
||||
- 2135:2135
|
||||
- 8765:8765
|
||||
- 2136:2136
|
||||
environment:
|
||||
- YDB_DEFAULT_LOG_LEVEL=DEBUG
|
||||
- GRPC_TLS_PORT=2135
|
||||
- GRPC_PORT=2136
|
||||
- MON_PORT=8765
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
- 8888:8888
|
||||
- 8000:8000
|
||||
- 18888:18888
|
||||
command: "server -ip=s3 -filer -master.volumeSizeLimitMB=16 -volume.max=0 -volume -volume.preStopSeconds=1 -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8000 -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=false"
|
||||
volumes:
|
||||
- ./s3.json:/etc/seaweedfs/s3.json
|
||||
environment:
|
||||
WEED_LEVELDB2_ENABLED: "false"
|
||||
WEED_YDB_ENABLED: "true"
|
||||
WEED_YDB_DSN: "grpc://ydb:2136/?database=local"
|
||||
WEED_YDB_PREFIX: "seaweedfs"
|
||||
YDB_ANONYMOUS_CREDENTIALS: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
depends_on:
|
||||
- ydb
|
|
@ -1,20 +0,0 @@
|
|||
WEED_GRPC_CA=/etc/seaweedfs/tls/SeaweedFS_CA.crt
|
||||
WEED_GRPC_ALLOWED_WILDCARD_DOMAIN=".dev"
|
||||
WEED_GRPC_MASTER_CERT=/etc/seaweedfs/tls/master01.dev.crt
|
||||
WEED_GRPC_MASTER_KEY=/etc/seaweedfs/tls/master01.dev.key
|
||||
WEED_GRPC_VOLUME_CERT=/etc/seaweedfs/tls/volume01.dev.crt
|
||||
WEED_GRPC_VOLUME_KEY=/etc/seaweedfs/tls/volume01.dev.key
|
||||
WEED_GRPC_FILER_CERT=/etc/seaweedfs/tls/filer01.dev.crt
|
||||
WEED_GRPC_FILER_KEY=/etc/seaweedfs/tls/filer01.dev.key
|
||||
WEED_GRPC_CLIENT_CERT=/etc/seaweedfs/tls/client01.dev.crt
|
||||
WEED_GRPC_CLIENT_KEY=/etc/seaweedfs/tls/client01.dev.key
|
||||
WEED_GRPC_MASTER_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev"
|
||||
WEED_GRPC_VOLUME_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev"
|
||||
WEED_GRPC_FILER_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev"
|
||||
WEED_GRPC_CLIENT_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev"
|
||||
WEED_HTTPS_CLIENT_ENABLE=true
|
||||
WEED_HTTPS_VOLUME_CERT=/etc/seaweedfs/tls/volume01.dev.crt
|
||||
WEED_HTTPS_VOLUME_KEY=/etc/seaweedfs/tls/volume01.dev.key
|
||||
WEED_HTTPS_VOLUME_CA=/etc/seaweedfs/tls/SeaweedFS_CA.crt
|
||||
#GRPC_GO_LOG_SEVERITY_LEVEL=info
|
||||
#GRPC_GO_LOG_VERBOSITY_LEVEL=2
|
|
@ -1,37 +0,0 @@
|
|||
[
|
||||
{
|
||||
"Username": "admin",
|
||||
"Password": "myadminpassword",
|
||||
"PublicKeys": [
|
||||
],
|
||||
"HomeDir": "/",
|
||||
"Permissions": {
|
||||
"/": ["*"]
|
||||
},
|
||||
"Uid": 0,
|
||||
"Gid": 0
|
||||
},
|
||||
{
|
||||
"Username": "user1",
|
||||
"Password": "myuser1password",
|
||||
"PublicKeys": [""],
|
||||
"HomeDir": "/user1",
|
||||
"Permissions": {
|
||||
"/user1": ["*"],
|
||||
"/public": ["read", "list","write"]
|
||||
},
|
||||
"Uid": 1111,
|
||||
"Gid": 1111
|
||||
},
|
||||
{
|
||||
"Username": "readonly",
|
||||
"Password": "myreadonlypassword",
|
||||
"PublicKeys": [],
|
||||
"HomeDir": "/public",
|
||||
"Permissions": {
|
||||
"/public": ["read", "list"]
|
||||
},
|
||||
"Uid": 1112,
|
||||
"Gid": 1112
|
||||
}
|
||||
]
|
|
@ -1,68 +1,48 @@
|
|||
#!/bin/sh
|
||||
|
||||
isArgPassed() {
|
||||
arg="$1"
|
||||
argWithEqualSign="$1="
|
||||
shift
|
||||
while [ $# -gt 0 ]; do
|
||||
passedArg="$1"
|
||||
shift
|
||||
case $passedArg in
|
||||
$arg)
|
||||
return 0
|
||||
;;
|
||||
$argWithEqualSign*)
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
|
||||
'master')
|
||||
ARGS="-mdir=/data -volumePreallocate -volumeSizeLimitMB=1024"
|
||||
shift
|
||||
exec /usr/bin/weed -logtostderr=true master $ARGS $@
|
||||
exec /usr/bin/weed $@ $ARGS
|
||||
;;
|
||||
|
||||
'volume')
|
||||
ARGS="-dir=/data -max=0"
|
||||
if isArgPassed "-max" "$@"; then
|
||||
if [[ $@ == *"-max="* ]]; then
|
||||
ARGS="-dir=/data"
|
||||
fi
|
||||
shift
|
||||
exec /usr/bin/weed -logtostderr=true volume $ARGS $@
|
||||
exec /usr/bin/weed $@ $ARGS
|
||||
;;
|
||||
|
||||
'server')
|
||||
ARGS="-dir=/data -volume.max=0 -master.volumePreallocate -master.volumeSizeLimitMB=1024"
|
||||
if isArgPassed "-volume.max" "$@"; then
|
||||
if [[ $@ == *"-volume.max="* ]]; then
|
||||
ARGS="-dir=/data -master.volumePreallocate -master.volumeSizeLimitMB=1024"
|
||||
fi
|
||||
shift
|
||||
exec /usr/bin/weed -logtostderr=true server $ARGS $@
|
||||
exec /usr/bin/weed $@ $ARGS
|
||||
;;
|
||||
|
||||
'filer')
|
||||
ARGS=""
|
||||
shift
|
||||
exec /usr/bin/weed -logtostderr=true filer $ARGS $@
|
||||
exec /usr/bin/weed $@ $ARGS
|
||||
;;
|
||||
|
||||
's3')
|
||||
ARGS="-domainName=$S3_DOMAIN_NAME -key.file=$S3_KEY_FILE -cert.file=$S3_CERT_FILE"
|
||||
shift
|
||||
exec /usr/bin/weed -logtostderr=true s3 $ARGS $@
|
||||
exec /usr/bin/weed $@ $ARGS
|
||||
;;
|
||||
|
||||
'shell')
|
||||
ARGS="-cluster=$SHELL_CLUSTER -filer=$SHELL_FILER -filerGroup=$SHELL_FILER_GROUP -master=$SHELL_MASTER -options=$SHELL_OPTIONS"
|
||||
shift
|
||||
exec echo "$@" | /usr/bin/weed -logtostderr=true shell $ARGS
|
||||
;;
|
||||
|
||||
'cronjob')
|
||||
MASTER=${WEED_MASTER-localhost:9333}
|
||||
FIX_REPLICATION_CRON_SCHEDULE=${CRON_SCHEDULE-*/7 * * * * *}
|
||||
echo "$FIX_REPLICATION_CRON_SCHEDULE" 'echo "volume.fix.replication" | weed shell -master='$MASTER > /crontab
|
||||
BALANCING_CRON_SCHEDULE=${CRON_SCHEDULE-25 * * * * *}
|
||||
echo "$BALANCING_CRON_SCHEDULE" 'echo "volume.balance -c ALL -force" | weed shell -master='$MASTER >> /crontab
|
||||
echo "Running Crontab:"
|
||||
cat /crontab
|
||||
exec supercronic /crontab
|
||||
;;
|
||||
*)
|
||||
exec /usr/bin/weed $@
|
||||
;;
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
[rocksdb]
|
||||
enabled = true
|
||||
dir = "/data/filer_rocksdb"
|
53
docker/local-cluster-compose.yml
Normal file
53
docker/local-cluster-compose.yml
Normal file
|
@ -0,0 +1,53 @@
|
|||
version: '2'
|
||||
|
||||
services:
|
||||
master0:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master0 -port=9333 -peers=master0:9333,master1:9334,master2:9335"
|
||||
master1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9334:9334
|
||||
- 19334:19334
|
||||
command: "master -ip=master1 -port=9334 -peers=master0:9333,master1:9334,master2:9335"
|
||||
master2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9335:9335
|
||||
- 19335:19335
|
||||
command: "master -ip=master2 -port=9335 -peers=master0:9333,master1:9334,master2:9335"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: 'filer -master="master0:9333,master1:9334,master2:9335"'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
- volume
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: 's3 -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
- volume
|
||||
- filer
|
35
docker/local-dev-compose.yml
Normal file
35
docker/local-dev-compose.yml
Normal file
|
@ -0,0 +1,35 @@
|
|||
version: '2'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: "volume -mserver=master:9333 -port=8080 -ip=volume"
|
||||
depends_on:
|
||||
- master
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: 'filer -master="master:9333"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: 's3 -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
|
@ -1,30 +0,0 @@
|
|||
# HTTP 1.1 support
|
||||
proxy_http_version 1.1;
|
||||
#proxy_buffering off;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $proxy_connection;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $proxy_x_forwarded_proto;
|
||||
proxy_set_header X-Forwarded-Ssl $proxy_x_forwarded_ssl;
|
||||
proxy_set_header X-Forwarded-Port $proxy_x_forwarded_port;
|
||||
|
||||
# Mitigate httpoxy attack (see README for details)
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
# aws default max_concurrent_requests 10
|
||||
# aws default multipart_threshold 8MB
|
||||
proxy_buffering on; # GET buffering or “X-Accel-Buffering” enables or disables buffering of a response;
|
||||
proxy_buffers 64 1m; # buffers used for reading a response from the proxied server, for a single connection
|
||||
proxy_buffer_size 8k; # maximum size of the data that nginx can receive from the server at a time is set
|
||||
proxy_busy_buffers_size 2m;
|
||||
|
||||
proxy_request_buffering on; # PUT buffering
|
||||
client_body_buffer_size 64m; # buffer size for reading client request body
|
||||
client_max_body_size 64m;
|
||||
|
||||
proxy_next_upstream error timeout non_idempotent http_500; # PUT request should be passed to the next server:
|
||||
proxy_connect_timeout 200ms;
|
||||
proxy_read_timeout 3s; #timeout is set only between two successive read operations
|
||||
proxy_send_timeout 3s; #timeout is set only between two successive write operations
|
|
@ -1,14 +0,0 @@
|
|||
global:
|
||||
scrape_interval: 30s
|
||||
scrape_timeout: 10s
|
||||
|
||||
scrape_configs:
|
||||
- job_name: services
|
||||
metrics_path: /metrics
|
||||
static_configs:
|
||||
- targets:
|
||||
- 'prometheus:9090'
|
||||
- 'master:9324'
|
||||
- 'volume:9325'
|
||||
- 'filer:9326'
|
||||
- 's3:9327'
|
|
@ -1,4 +1,4 @@
|
|||
version: '3.9'
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
master:
|
||||
|
@ -6,15 +6,13 @@ services:
|
|||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
- 9324:9324
|
||||
command: "master -ip=master -ip.bind=0.0.0.0 -metricsPort=9324"
|
||||
command: "master -ip=master"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs # use a remote image
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
- 9325:9325
|
||||
command: 'volume -mserver="master:9333" -ip.bind=0.0.0.0 -port=8080 -metricsPort=9325'
|
||||
command: 'volume -mserver="master:9333" -port=8080'
|
||||
depends_on:
|
||||
- master
|
||||
filer:
|
||||
|
@ -22,38 +20,28 @@ services:
|
|||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 9326:9326
|
||||
command: 'filer -master="master:9333" -ip.bind=0.0.0.0 -metricsPort=9326'
|
||||
command: 'filer -master="master:9333"'
|
||||
tty: true
|
||||
stdin_open: true
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
cronjob:
|
||||
image: chrislusf/seaweedfs # use a remote image
|
||||
command: 'cronjob'
|
||||
environment:
|
||||
# Run re-replication every 2 minutes
|
||||
CRON_SCHEDULE: '*/2 * * * * *' # Default: '*/5 * * * * *'
|
||||
WEED_MASTER: master:9333 # Default: localhost:9333
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
s3:
|
||||
image: chrislusf/seaweedfs # use a remote image
|
||||
ports:
|
||||
- 8333:8333
|
||||
- 9327:9327
|
||||
command: 's3 -filer="filer:8888" -ip.bind=0.0.0.0 -metricsPort=9327'
|
||||
command: 's3 -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
webdav:
|
||||
image: chrislusf/seaweedfs # use a remote image
|
||||
ports:
|
||||
- 7333:7333
|
||||
command: 'webdav -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
prometheus:
|
||||
image: prom/prometheus:v2.21.0
|
||||
ports:
|
||||
- 9000:9090
|
||||
volumes:
|
||||
- ./prometheus:/etc/prometheus
|
||||
command: --web.enable-lifecycle --config.file=/etc/prometheus/prometheus.yml
|
||||
depends_on:
|
||||
- s3
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
version: '3.9'
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
master:
|
||||
|
@ -20,7 +20,7 @@ services:
|
|||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: 'filer -master="master:9333" -ip.bind=0.0.0.0'
|
||||
command: 'filer -master="master:9333"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
|
@ -28,16 +28,7 @@ services:
|
|||
image: chrislusf/seaweedfs:dev # use a remote dev image
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: 's3 -filer="filer:8888" -ip.bind=0.0.0.0'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
webdav:
|
||||
image: chrislusf/seaweedfs:dev # use a remote dev image
|
||||
ports:
|
||||
- 7333:7333
|
||||
command: 'webdav -filer="filer:8888"'
|
||||
command: 's3 -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
|
|
|
@ -1,12 +0,0 @@
|
|||
CREATE DATABASE IF NOT EXISTS seaweedfs;
|
||||
CREATE USER IF NOT EXISTS 'seaweedfs'@'%' IDENTIFIED BY 'secret';
|
||||
GRANT ALL PRIVILEGES ON seaweedfs.* TO 'seaweedfs'@'%';
|
||||
FLUSH PRIVILEGES;
|
||||
USE seaweedfs;
|
||||
CREATE TABLE IF NOT EXISTS `filemeta` (
|
||||
`dirhash` BIGINT NOT NULL COMMENT 'first 64 bits of MD5 hash value of directory field',
|
||||
`name` VARCHAR(766) NOT NULL COMMENT 'directory or file name',
|
||||
`directory` TEXT NOT NULL COMMENT 'full path to parent directory',
|
||||
`meta` LONGBLOB,
|
||||
PRIMARY KEY (`dirhash`, `name`)
|
||||
) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
|
|
@ -1,14 +0,0 @@
|
|||
package = 'app'
|
||||
version = 'scm-1'
|
||||
source = {
|
||||
url = '/dev/null',
|
||||
}
|
||||
dependencies = {
|
||||
'crud == 1.5.2-1',
|
||||
'expirationd == 1.6.0-1',
|
||||
'metrics-export-role == 0.3.0-1',
|
||||
'vshard == 0.1.32-1'
|
||||
}
|
||||
build = {
|
||||
type = 'none';
|
||||
}
|
|
@ -1,145 +0,0 @@
|
|||
config:
|
||||
context:
|
||||
app_user_password:
|
||||
from: env
|
||||
env: APP_USER_PASSWORD
|
||||
client_user_password:
|
||||
from: env
|
||||
env: CLIENT_USER_PASSWORD
|
||||
replicator_user_password:
|
||||
from: env
|
||||
env: REPLICATOR_USER_PASSWORD
|
||||
storage_user_password:
|
||||
from: env
|
||||
env: STORAGE_USER_PASSWORD
|
||||
|
||||
credentials:
|
||||
roles:
|
||||
crud-role:
|
||||
privileges:
|
||||
- permissions: [ "execute" ]
|
||||
lua_call: [ "crud.delete", "crud.get", "crud.upsert" ]
|
||||
users:
|
||||
app:
|
||||
password: '{{ context.app_user_password }}'
|
||||
roles: [ public, crud-role ]
|
||||
client:
|
||||
password: '{{ context.client_user_password }}'
|
||||
roles: [ super ]
|
||||
replicator:
|
||||
password: '{{ context.replicator_user_password }}'
|
||||
roles: [ replication ]
|
||||
storage:
|
||||
password: '{{ context.storage_user_password }}'
|
||||
roles: [ sharding ]
|
||||
|
||||
iproto:
|
||||
advertise:
|
||||
peer:
|
||||
login: replicator
|
||||
sharding:
|
||||
login: storage
|
||||
|
||||
sharding:
|
||||
bucket_count: 10000
|
||||
|
||||
metrics:
|
||||
include: [ all ]
|
||||
exclude: [ vinyl ]
|
||||
labels:
|
||||
alias: '{{ instance_name }}'
|
||||
|
||||
|
||||
groups:
|
||||
storages:
|
||||
roles:
|
||||
- roles.crud-storage
|
||||
- roles.expirationd
|
||||
- roles.metrics-export
|
||||
roles_cfg:
|
||||
roles.expirationd:
|
||||
cfg:
|
||||
metrics: true
|
||||
filer_metadata_task:
|
||||
space: filer_metadata
|
||||
is_expired: filer_metadata.is_expired
|
||||
options:
|
||||
atomic_iteration: true
|
||||
force: true
|
||||
index: 'expire_at_idx'
|
||||
iterator_type: GT
|
||||
start_key:
|
||||
- 0
|
||||
tuples_per_iteration: 10000
|
||||
app:
|
||||
module: storage
|
||||
sharding:
|
||||
roles: [ storage ]
|
||||
replication:
|
||||
failover: election
|
||||
database:
|
||||
use_mvcc_engine: true
|
||||
replicasets:
|
||||
storage-001:
|
||||
instances:
|
||||
storage-001-a:
|
||||
roles_cfg:
|
||||
roles.metrics-export:
|
||||
http:
|
||||
- listen: '0.0.0.0:8081'
|
||||
endpoints:
|
||||
- path: /metrics/prometheus/
|
||||
format: prometheus
|
||||
- path: /metrics/json
|
||||
format: json
|
||||
iproto:
|
||||
listen:
|
||||
- uri: 127.0.0.1:3301
|
||||
advertise:
|
||||
client: 127.0.0.1:3301
|
||||
storage-001-b:
|
||||
roles_cfg:
|
||||
roles.metrics-export:
|
||||
http:
|
||||
- listen: '0.0.0.0:8082'
|
||||
endpoints:
|
||||
- path: /metrics/prometheus/
|
||||
format: prometheus
|
||||
- path: /metrics/json
|
||||
format: json
|
||||
iproto:
|
||||
listen:
|
||||
- uri: 127.0.0.1:3302
|
||||
advertise:
|
||||
client: 127.0.0.1:3302
|
||||
routers:
|
||||
roles:
|
||||
- roles.crud-router
|
||||
- roles.metrics-export
|
||||
roles_cfg:
|
||||
roles.crud-router:
|
||||
stats: true
|
||||
stats_driver: metrics
|
||||
stats_quantiles: true
|
||||
app:
|
||||
module: router
|
||||
sharding:
|
||||
roles: [ router ]
|
||||
replicasets:
|
||||
router-001:
|
||||
instances:
|
||||
router-001-a:
|
||||
roles_cfg:
|
||||
roles.metrics-export:
|
||||
http:
|
||||
- listen: '0.0.0.0:8083'
|
||||
endpoints:
|
||||
- path: /metrics/prometheus/
|
||||
format: prometheus
|
||||
- path: /metrics/json
|
||||
format: json
|
||||
iproto:
|
||||
listen:
|
||||
- uri: 127.0.0.1:3303
|
||||
advertise:
|
||||
client: 127.0.0.1:3303
|
|
@ -1,7 +0,0 @@
|
|||
---
|
||||
storage-001-a:
|
||||
|
||||
storage-001-b:
|
||||
|
||||
router-001-a:
|
||||
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue