mirror of
https://github.com/chrislusf/seaweedfs
synced 2025-07-27 14:02:47 +02:00
Compare commits
No commits in common. "master" and "1.02" have entirely different histories.
1607 changed files with 17182 additions and 378662 deletions
4
.github/FUNDING.yml
vendored
4
.github/FUNDING.yml
vendored
|
@ -1,4 +0,0 @@
|
|||
# These are supported funding model platforms
|
||||
|
||||
github: chrislusf
|
||||
patreon: seaweedfs
|
35
.github/ISSUE_TEMPLATE.md
vendored
35
.github/ISSUE_TEMPLATE.md
vendored
|
@ -1,35 +0,0 @@
|
|||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
Sponsors SeaweedFS via Patreon https://www.patreon.com/seaweedfs
|
||||
Report issues here. Ask questions here https://stackoverflow.com/questions/tagged/seaweedfs
|
||||
Please ask questions in https://github.com/seaweedfs/seaweedfs/discussions
|
||||
|
||||
example of a good issue report:
|
||||
https://github.com/seaweedfs/seaweedfs/issues/1005
|
||||
example of a bad issue report:
|
||||
https://github.com/seaweedfs/seaweedfs/issues/1008
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**System Setup**
|
||||
- List the command line to start "weed master", "weed volume", "weed filer", "weed s3", "weed mount".
|
||||
- OS version
|
||||
- output of `weed version`
|
||||
- if using filer, show the content of `filer.toml`
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
22
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
22
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
|
||||
---
|
||||
|
||||
Sponsors SeaweedFS via Patreon https://www.patreon.com/seaweedfs
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**System Setup**
|
||||
List the command line to start "weed master", "weed volume", "weed filer", "weed s3", "weed mount".
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
10
.github/dependabot.yml
vendored
10
.github/dependabot.yml
vendored
|
@ -1,10 +0,0 @@
|
|||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
15
.github/pull_request_template.md
vendored
15
.github/pull_request_template.md
vendored
|
@ -1,15 +0,0 @@
|
|||
# What problem are we solving?
|
||||
|
||||
|
||||
|
||||
# How are we solving the problem?
|
||||
|
||||
|
||||
|
||||
# How is the PR tested?
|
||||
|
||||
|
||||
|
||||
# Checks
|
||||
- [ ] I have added unit tests if possible.
|
||||
- [ ] I will add related wiki document changes and link to this PR after merging.
|
124
.github/workflows/binaries_dev.yml
vendored
124
.github/workflows/binaries_dev.yml
vendored
|
@ -1,124 +0,0 @@
|
|||
name: "go: build dev binaries"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
|
||||
cleanup:
|
||||
permissions:
|
||||
contents: write # for mknejp/delete-release-assets to delete release assets
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
|
||||
- name: Delete old release assets
|
||||
uses: mknejp/delete-release-assets@v1
|
||||
with:
|
||||
token: ${{ github.token }}
|
||||
tag: dev
|
||||
fail-if-no-assets: false
|
||||
assets: |
|
||||
weed-*
|
||||
|
||||
build_dev_linux_windows:
|
||||
permissions:
|
||||
contents: write # for wangyoucao577/go-release-action to upload release assets
|
||||
needs: cleanup
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
goos: [linux, windows]
|
||||
goarch: [amd64]
|
||||
|
||||
steps:
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
|
||||
- name: Set BUILD_TIME env
|
||||
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
|
||||
|
||||
- name: Go Release Binaries Large Disk
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
release_tag: dev
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed-large-disk
|
||||
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
||||
|
||||
- name: Go Release Binaries Normal Volume Size
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
release_tag: dev
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed-normal-disk
|
||||
asset_name: "weed-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
||||
|
||||
build_dev_darwin:
|
||||
permissions:
|
||||
contents: write # for wangyoucao577/go-release-action to upload release assets
|
||||
needs: build_dev_linux_windows
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
goos: [darwin]
|
||||
goarch: [amd64, arm64]
|
||||
|
||||
steps:
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
|
||||
- name: Set BUILD_TIME env
|
||||
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
|
||||
|
||||
- name: Go Release Binaries Large Disk
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
release_tag: dev
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed-large-disk
|
||||
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
||||
|
||||
- name: Go Release Binaries Normal Volume Size
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
release_tag: dev
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed-normal-disk
|
||||
asset_name: "weed-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
59
.github/workflows/binaries_release0.yml
vendored
59
.github/workflows/binaries_release0.yml
vendored
|
@ -1,59 +0,0 @@
|
|||
# This is a basic workflow to help you get started with Actions
|
||||
|
||||
name: "go: build versioned binaries for windows"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
|
||||
build-release-binaries_windows:
|
||||
permissions:
|
||||
contents: write # for wangyoucao577/go-release-action to upload release assets
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
goos: [windows]
|
||||
goarch: [amd64]
|
||||
|
||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
- name: Go Release Binaries Normal Volume Size
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
# build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed
|
||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
|
||||
- name: Go Release Large Disk Binaries
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed
|
||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_large_disk"
|
59
.github/workflows/binaries_release1.yml
vendored
59
.github/workflows/binaries_release1.yml
vendored
|
@ -1,59 +0,0 @@
|
|||
# This is a basic workflow to help you get started with Actions
|
||||
|
||||
name: "go: build versioned binaries for linux"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
|
||||
build-release-binaries_linux:
|
||||
permissions:
|
||||
contents: write # for wangyoucao577/go-release-action to upload release assets
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
goos: [linux]
|
||||
goarch: [amd64, arm, arm64]
|
||||
|
||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
- name: Go Release Binaries Normal Volume Size
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
# build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed
|
||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
|
||||
- name: Go Release Large Disk Binaries
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed
|
||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_large_disk"
|
59
.github/workflows/binaries_release2.yml
vendored
59
.github/workflows/binaries_release2.yml
vendored
|
@ -1,59 +0,0 @@
|
|||
# This is a basic workflow to help you get started with Actions
|
||||
|
||||
name: "go: build versioned binaries for darwin"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
|
||||
build-release-binaries_darwin:
|
||||
permissions:
|
||||
contents: write # for wangyoucao577/go-release-action to upload release assets
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
goos: [darwin]
|
||||
goarch: [amd64, arm64]
|
||||
|
||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
- name: Go Release Binaries Normal Volume Size
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
# build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed
|
||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
|
||||
- name: Go Release Large Disk Binaries
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed
|
||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_large_disk"
|
59
.github/workflows/binaries_release3.yml
vendored
59
.github/workflows/binaries_release3.yml
vendored
|
@ -1,59 +0,0 @@
|
|||
# This is a basic workflow to help you get started with Actions
|
||||
|
||||
name: "go: build versioned binaries for freebsd"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
|
||||
build-release-binaries_freebsd:
|
||||
permissions:
|
||||
contents: write # for wangyoucao577/go-release-action to upload release assets
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
goos: [freebsd]
|
||||
goarch: [amd64, arm, arm64]
|
||||
|
||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
- name: Go Release Binaries Normal Volume Size
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
# build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed
|
||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
|
||||
- name: Go Release Large Disk Binaries
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed
|
||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_large_disk"
|
60
.github/workflows/binaries_release4.yml
vendored
60
.github/workflows/binaries_release4.yml
vendored
|
@ -1,60 +0,0 @@
|
|||
# This is a basic workflow to help you get started with Actions
|
||||
|
||||
name: "go: build versioned binaries for linux with all tags"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
|
||||
build-release-binaries_linux:
|
||||
permissions:
|
||||
contents: write # for wangyoucao577/go-release-action to upload release assets
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
goos: [linux]
|
||||
goarch: [amd64]
|
||||
|
||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
- name: Go Release Binaries Normal Volume Size
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
overwrite: true
|
||||
build_flags: -tags elastic,gocdk,rclone,sqlite,tarantool,tikv,ydb
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
# build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed
|
||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_full"
|
||||
- name: Go Release Large Disk Binaries
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
build_flags: -tags 5BytesOffset,elastic,gocdk,rclone,sqlite,tarantool,tikv,ydb
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed
|
||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_full_large_disk"
|
59
.github/workflows/binaries_release5.yml
vendored
59
.github/workflows/binaries_release5.yml
vendored
|
@ -1,59 +0,0 @@
|
|||
# This is a basic workflow to help you get started with Actions
|
||||
|
||||
name: "go: build versioned binaries for openbsd"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
|
||||
build-release-binaries_openbsd:
|
||||
permissions:
|
||||
contents: write # for wangyoucao577/go-release-action to upload release assets
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
goos: [openbsd]
|
||||
goarch: [amd64, arm, arm64]
|
||||
|
||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
- name: Go Release Binaries Normal Volume Size
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
# build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed
|
||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
|
||||
- name: Go Release Large Disk Binaries
|
||||
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||
build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed
|
||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_large_disk"
|
47
.github/workflows/codeql.yml
vendored
47
.github/workflows/codeql.yml
vendored
|
@ -1,47 +0,0 @@
|
|||
name: "Code Scanning - Action"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.head_ref }}/codeql
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
CodeQL-Build:
|
||||
# CodeQL runs on ubuntu-latest, windows-latest, and macos-latest
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
# required for all workflows
|
||||
security-events: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3
|
||||
# Override language selection by uncommenting this and choosing your languages
|
||||
with:
|
||||
languages: go
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below).
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v3
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following
|
||||
# three lines and modify them (or add more) to build your code if your
|
||||
# project uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3
|
66
.github/workflows/container_dev.yml
vendored
66
.github/workflows/container_dev.yml
vendored
|
@ -1,66 +0,0 @@
|
|||
name: "docker: build dev containers"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
workflow_dispatch: {}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
|
||||
build-dev-containers:
|
||||
runs-on: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
-
|
||||
name: Docker meta
|
||||
id: docker_meta
|
||||
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v3
|
||||
with:
|
||||
images: |
|
||||
chrislusf/seaweedfs
|
||||
ghcr.io/chrislusf/seaweedfs
|
||||
tags: |
|
||||
type=raw,value=dev
|
||||
labels: |
|
||||
org.opencontainers.image.title=seaweedfs
|
||||
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
|
||||
org.opencontainers.image.vendor=Chris Lu
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
|
||||
with:
|
||||
buildkitd-flags: "--debug"
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
-
|
||||
name: Login to GHCR
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ secrets.GHCR_USERNAME }}
|
||||
password: ${{ secrets.GHCR_TOKEN }}
|
||||
-
|
||||
name: Build
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2
|
||||
with:
|
||||
context: ./docker
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
file: ./docker/Dockerfile.go_build
|
||||
platforms: linux/amd64, linux/arm64
|
||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
67
.github/workflows/container_latest.yml
vendored
67
.github/workflows/container_latest.yml
vendored
|
@ -1,67 +0,0 @@
|
|||
name: "docker: build latest container"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
workflow_dispatch: {}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
|
||||
build-latest-container:
|
||||
runs-on: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
-
|
||||
name: Docker meta
|
||||
id: docker_meta
|
||||
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v3
|
||||
with:
|
||||
images: |
|
||||
chrislusf/seaweedfs
|
||||
ghcr.io/chrislusf/seaweedfs
|
||||
tags: |
|
||||
type=raw,value=latest
|
||||
labels: |
|
||||
org.opencontainers.image.title=seaweedfs
|
||||
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
|
||||
org.opencontainers.image.vendor=Chris Lu
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
|
||||
with:
|
||||
buildkitd-flags: "--debug"
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
-
|
||||
name: Login to GHCR
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ secrets.GHCR_USERNAME }}
|
||||
password: ${{ secrets.GHCR_TOKEN }}
|
||||
-
|
||||
name: Build
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2
|
||||
with:
|
||||
context: ./docker
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
file: ./docker/Dockerfile.go_build
|
||||
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
|
||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
57
.github/workflows/container_release1.yml
vendored
57
.github/workflows/container_release1.yml
vendored
|
@ -1,57 +0,0 @@
|
|||
name: "docker: build release containers for normal volume"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
workflow_dispatch: {}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-default-release-container:
|
||||
runs-on: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
-
|
||||
name: Docker meta
|
||||
id: docker_meta
|
||||
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v3
|
||||
with:
|
||||
images: |
|
||||
chrislusf/seaweedfs
|
||||
tags: |
|
||||
type=ref,event=tag
|
||||
flavor: |
|
||||
latest=false
|
||||
labels: |
|
||||
org.opencontainers.image.title=seaweedfs
|
||||
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
|
||||
org.opencontainers.image.vendor=Chris Lu
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
-
|
||||
name: Build
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2
|
||||
with:
|
||||
context: ./docker
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
file: ./docker/Dockerfile.go_build
|
||||
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
|
||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
59
.github/workflows/container_release2.yml
vendored
59
.github/workflows/container_release2.yml
vendored
|
@ -1,59 +0,0 @@
|
|||
name: "docker: build release containers for large volume"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
workflow_dispatch: {}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
|
||||
build-large-release-container:
|
||||
runs-on: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
-
|
||||
name: Docker meta
|
||||
id: docker_meta
|
||||
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v3
|
||||
with:
|
||||
images: |
|
||||
chrislusf/seaweedfs
|
||||
tags: |
|
||||
type=ref,event=tag,suffix=_large_disk
|
||||
flavor: |
|
||||
latest=false
|
||||
labels: |
|
||||
org.opencontainers.image.title=seaweedfs
|
||||
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
|
||||
org.opencontainers.image.vendor=Chris Lu
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
-
|
||||
name: Build
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2
|
||||
with:
|
||||
context: ./docker
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
file: ./docker/Dockerfile.go_build
|
||||
build-args: TAGS=5BytesOffset
|
||||
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
|
||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
58
.github/workflows/container_release3.yml
vendored
58
.github/workflows/container_release3.yml
vendored
|
@ -1,58 +0,0 @@
|
|||
name: "docker: build release containers for rocksdb"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
workflow_dispatch: {}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
|
||||
build-large-release-container_rocksdb:
|
||||
runs-on: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
-
|
||||
name: Docker meta
|
||||
id: docker_meta
|
||||
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v3
|
||||
with:
|
||||
images: |
|
||||
chrislusf/seaweedfs
|
||||
tags: |
|
||||
type=ref,event=tag,suffix=_large_disk_rocksdb
|
||||
flavor: |
|
||||
latest=false
|
||||
labels: |
|
||||
org.opencontainers.image.title=seaweedfs
|
||||
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
|
||||
org.opencontainers.image.vendor=Chris Lu
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
-
|
||||
name: Build
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2
|
||||
with:
|
||||
context: ./docker
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
file: ./docker/Dockerfile.rocksdb_large
|
||||
platforms: linux/amd64
|
||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
58
.github/workflows/container_release4.yml
vendored
58
.github/workflows/container_release4.yml
vendored
|
@ -1,58 +0,0 @@
|
|||
name: "docker: build release containers for all tags"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
workflow_dispatch: {}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-default-release-container:
|
||||
runs-on: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
-
|
||||
name: Docker meta
|
||||
id: docker_meta
|
||||
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v3
|
||||
with:
|
||||
images: |
|
||||
chrislusf/seaweedfs
|
||||
tags: |
|
||||
type=ref,event=tag,suffix=_full
|
||||
flavor: |
|
||||
latest=false
|
||||
labels: |
|
||||
org.opencontainers.image.title=seaweedfs
|
||||
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
|
||||
org.opencontainers.image.vendor=Chris Lu
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
-
|
||||
name: Build
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2
|
||||
with:
|
||||
context: ./docker
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
file: ./docker/Dockerfile.go_build
|
||||
build-args: TAGS=elastic,gocdk,rclone,sqlite,tarantool,tikv,ydb
|
||||
platforms: linux/amd64
|
||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
58
.github/workflows/container_release5.yml
vendored
58
.github/workflows/container_release5.yml
vendored
|
@ -1,58 +0,0 @@
|
|||
name: "docker: build release containers for all tags and large volume"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
workflow_dispatch: {}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-default-release-container:
|
||||
runs-on: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
-
|
||||
name: Docker meta
|
||||
id: docker_meta
|
||||
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v3
|
||||
with:
|
||||
images: |
|
||||
chrislusf/seaweedfs
|
||||
tags: |
|
||||
type=ref,event=tag,suffix=_large_disk_full
|
||||
flavor: |
|
||||
latest=false
|
||||
labels: |
|
||||
org.opencontainers.image.title=seaweedfs
|
||||
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
|
||||
org.opencontainers.image.vendor=Chris Lu
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
-
|
||||
name: Build
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2
|
||||
with:
|
||||
context: ./docker
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
file: ./docker/Dockerfile.go_build
|
||||
build-args: TAGS=5BytesOffset,elastic,gocdk,rclone,sqlite,tarantool,tikv,ydb
|
||||
platforms: linux/amd64
|
||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
171
.github/workflows/deploy_telemetry.yml
vendored
171
.github/workflows/deploy_telemetry.yml
vendored
|
@ -1,171 +0,0 @@
|
|||
# This workflow will build and deploy the SeaweedFS telemetry server
|
||||
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go
|
||||
|
||||
name: Deploy Telemetry Server
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
setup:
|
||||
description: 'Run first-time server setup'
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
deploy:
|
||||
description: 'Deploy telemetry server to remote server'
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5.5.0
|
||||
with:
|
||||
go-version: '1.24'
|
||||
|
||||
- name: Build Telemetry Server
|
||||
if: github.event_name == 'workflow_dispatch' && inputs.deploy
|
||||
run: |
|
||||
go mod tidy
|
||||
echo "Building telemetry server..."
|
||||
GOOS=linux GOARCH=amd64 go build -o telemetry-server ./telemetry/server/main.go
|
||||
ls -la telemetry-server
|
||||
echo "Build completed successfully"
|
||||
|
||||
- name: First-time Server Setup
|
||||
if: github.event_name == 'workflow_dispatch' && inputs.setup
|
||||
env:
|
||||
SSH_PRIVATE_KEY: ${{ secrets.TELEMETRY_SSH_PRIVATE_KEY }}
|
||||
REMOTE_HOST: ${{ secrets.TELEMETRY_HOST }}
|
||||
REMOTE_USER: ${{ secrets.TELEMETRY_USER }}
|
||||
run: |
|
||||
mkdir -p ~/.ssh
|
||||
echo "$SSH_PRIVATE_KEY" > ~/.ssh/deploy_key
|
||||
chmod 600 ~/.ssh/deploy_key
|
||||
echo "Host *" > ~/.ssh/config
|
||||
echo " StrictHostKeyChecking no" >> ~/.ssh/config
|
||||
|
||||
# Create all required directories with proper permissions
|
||||
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "
|
||||
mkdir -p ~/seaweedfs-telemetry/bin ~/seaweedfs-telemetry/logs ~/seaweedfs-telemetry/data ~/seaweedfs-telemetry/tmp && \
|
||||
chmod 755 ~/seaweedfs-telemetry/logs && \
|
||||
chmod 755 ~/seaweedfs-telemetry/data && \
|
||||
touch ~/seaweedfs-telemetry/logs/telemetry.log ~/seaweedfs-telemetry/logs/telemetry.error.log && \
|
||||
chmod 644 ~/seaweedfs-telemetry/logs/*.log"
|
||||
|
||||
# Create systemd service file
|
||||
echo "
|
||||
[Unit]
|
||||
Description=SeaweedFS Telemetry Server
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=$REMOTE_USER
|
||||
WorkingDirectory=/home/$REMOTE_USER/seaweedfs-telemetry
|
||||
ExecStart=/home/$REMOTE_USER/seaweedfs-telemetry/bin/telemetry-server -port=8353
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=append:/home/$REMOTE_USER/seaweedfs-telemetry/logs/telemetry.log
|
||||
StandardError=append:/home/$REMOTE_USER/seaweedfs-telemetry/logs/telemetry.error.log
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target" > telemetry.service
|
||||
|
||||
# Setup logrotate configuration
|
||||
echo "# SeaweedFS Telemetry service log rotation
|
||||
/home/$REMOTE_USER/seaweedfs-telemetry/logs/*.log {
|
||||
daily
|
||||
rotate 30
|
||||
compress
|
||||
delaycompress
|
||||
missingok
|
||||
notifempty
|
||||
create 644 $REMOTE_USER $REMOTE_USER
|
||||
postrotate
|
||||
systemctl restart telemetry.service
|
||||
endscript
|
||||
}" > telemetry_logrotate
|
||||
|
||||
# Copy configuration files
|
||||
scp -i ~/.ssh/deploy_key telemetry/grafana-dashboard.json $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
|
||||
scp -i ~/.ssh/deploy_key telemetry/prometheus.yml $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
|
||||
|
||||
# Copy and install service and logrotate files
|
||||
scp -i ~/.ssh/deploy_key telemetry.service telemetry_logrotate $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
|
||||
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "
|
||||
sudo mv ~/seaweedfs-telemetry/telemetry.service /etc/systemd/system/ && \
|
||||
sudo mv ~/seaweedfs-telemetry/telemetry_logrotate /etc/logrotate.d/seaweedfs-telemetry && \
|
||||
sudo systemctl daemon-reload && \
|
||||
sudo systemctl enable telemetry.service"
|
||||
|
||||
echo "✅ First-time setup completed successfully!"
|
||||
echo "📋 Next step: Run the deployment to install the telemetry server binary"
|
||||
echo " 1. Go to GitHub Actions → Deploy Telemetry Server"
|
||||
echo " 2. Click 'Run workflow'"
|
||||
echo " 3. Check 'Deploy telemetry server to remote server'"
|
||||
echo " 4. Click 'Run workflow'"
|
||||
|
||||
rm -f ~/.ssh/deploy_key
|
||||
|
||||
- name: Deploy Telemetry Server to Remote Server
|
||||
if: github.event_name == 'workflow_dispatch' && inputs.deploy
|
||||
env:
|
||||
SSH_PRIVATE_KEY: ${{ secrets.TELEMETRY_SSH_PRIVATE_KEY }}
|
||||
REMOTE_HOST: ${{ secrets.TELEMETRY_HOST }}
|
||||
REMOTE_USER: ${{ secrets.TELEMETRY_USER }}
|
||||
run: |
|
||||
mkdir -p ~/.ssh
|
||||
echo "$SSH_PRIVATE_KEY" > ~/.ssh/deploy_key
|
||||
chmod 600 ~/.ssh/deploy_key
|
||||
echo "Host *" > ~/.ssh/config
|
||||
echo " StrictHostKeyChecking no" >> ~/.ssh/config
|
||||
|
||||
# Create temp directory and copy binary
|
||||
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "mkdir -p ~/seaweedfs-telemetry/tmp"
|
||||
scp -i ~/.ssh/deploy_key telemetry-server $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/tmp/
|
||||
|
||||
# Copy updated configuration files
|
||||
scp -i ~/.ssh/deploy_key telemetry/grafana-dashboard.json $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
|
||||
scp -i ~/.ssh/deploy_key telemetry/prometheus.yml $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
|
||||
|
||||
# Check if service exists and deploy accordingly
|
||||
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "
|
||||
if systemctl list-unit-files telemetry.service >/dev/null 2>&1; then
|
||||
echo 'Service exists, performing update...'
|
||||
sudo systemctl stop telemetry.service
|
||||
mkdir -p ~/seaweedfs-telemetry/bin
|
||||
mv ~/seaweedfs-telemetry/tmp/telemetry-server ~/seaweedfs-telemetry/bin/
|
||||
chmod +x ~/seaweedfs-telemetry/bin/telemetry-server
|
||||
sudo systemctl start telemetry.service
|
||||
sudo systemctl status telemetry.service
|
||||
else
|
||||
echo 'ERROR: telemetry.service not found!'
|
||||
echo 'Please run the first-time setup before deploying.'
|
||||
echo 'Go to GitHub Actions → Deploy Telemetry Server → Run workflow → Check \"Run first-time server setup\"'
|
||||
exit 1
|
||||
fi"
|
||||
|
||||
# Verify deployment
|
||||
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "
|
||||
echo 'Waiting for service to start...'
|
||||
sleep 5
|
||||
curl -f http://localhost:8353/health || echo 'Health check failed'"
|
||||
|
||||
rm -f ~/.ssh/deploy_key
|
||||
|
||||
- name: Notify Deployment Status
|
||||
if: always()
|
||||
run: |
|
||||
if [ "${{ job.status }}" == "success" ]; then
|
||||
echo "✅ Telemetry server deployment successful"
|
||||
echo "Dashboard: http://${{ secrets.TELEMETRY_HOST }}:8353"
|
||||
echo "Metrics: http://${{ secrets.TELEMETRY_HOST }}:8353/metrics"
|
||||
else
|
||||
echo "❌ Telemetry server deployment failed"
|
||||
fi
|
14
.github/workflows/depsreview.yml
vendored
14
.github/workflows/depsreview.yml
vendored
|
@ -1,14 +0,0 @@
|
|||
name: 'Dependency Review'
|
||||
on: [pull_request]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
dependency-review:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: 'Checkout Repository'
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
- name: 'Dependency Review'
|
||||
uses: actions/dependency-review-action@da24556b548a50705dd671f47852072ea4c105d9
|
104
.github/workflows/e2e.yml
vendored
104
.github/workflows/e2e.yml
vendored
|
@ -1,104 +0,0 @@
|
|||
name: "End to End"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.head_ref }}/e2e
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: docker
|
||||
|
||||
jobs:
|
||||
e2e:
|
||||
name: FUSE Mount
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@fa96338abe5531f6e34c5cc0bbe28c1a533d5505 # v2
|
||||
with:
|
||||
go-version: ^1.13
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y fuse
|
||||
|
||||
- name: Start SeaweedFS
|
||||
timeout-minutes: 5
|
||||
run: make build_e2e && docker compose -f ./compose/e2e-mount.yml up --wait
|
||||
|
||||
- name: Run FIO 4k
|
||||
timeout-minutes: 15
|
||||
run: |
|
||||
echo "Starting FIO at: $(date)"
|
||||
# Concurrent r/w
|
||||
echo 'Run randrw with size=16M bs=4k'
|
||||
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randrw --bs=4k --direct=1 --numjobs=8 --ioengine=libaio --group_reporting --runtime=30 --time_based=1
|
||||
|
||||
echo "Verify FIO at: $(date)"
|
||||
# Verified write
|
||||
echo 'Run randwrite with size=16M bs=4k'
|
||||
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randwrite --bs=4k --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1 --do_verify=0 --verify=crc32c --verify_backlog=1
|
||||
|
||||
- name: Run FIO 128k
|
||||
timeout-minutes: 15
|
||||
run: |
|
||||
echo "Starting FIO at: $(date)"
|
||||
# Concurrent r/w
|
||||
echo 'Run randrw with size=16M bs=128k'
|
||||
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randrw --bs=128k --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1
|
||||
|
||||
echo "Verify FIO at: $(date)"
|
||||
# Verified write
|
||||
echo 'Run randwrite with size=16M bs=128k'
|
||||
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randwrite --bs=128k --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1 --do_verify=0 --verify=crc32c --verify_backlog=1
|
||||
|
||||
- name: Run FIO 1MB
|
||||
timeout-minutes: 15
|
||||
run: |
|
||||
echo "Starting FIO at: $(date)"
|
||||
# Concurrent r/w
|
||||
echo 'Run randrw with size=16M bs=1m'
|
||||
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randrw --bs=1m --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1
|
||||
|
||||
echo "Verify FIO at: $(date)"
|
||||
# Verified write
|
||||
echo 'Run randwrite with size=16M bs=1m'
|
||||
docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randwrite --bs=1m --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1 --do_verify=0 --verify=crc32c --verify_backlog=1
|
||||
|
||||
- name: Save logs
|
||||
if: always()
|
||||
run: |
|
||||
docker compose -f ./compose/e2e-mount.yml logs > output.log
|
||||
echo 'Showing last 500 log lines of mount service:'
|
||||
docker compose -f ./compose/e2e-mount.yml logs --tail 500 mount
|
||||
|
||||
- name: Check for data races
|
||||
if: always()
|
||||
continue-on-error: true # TODO: remove this comment to enable build failure on data races (after all are fixed)
|
||||
run: grep -A50 'DATA RACE' output.log && exit 1 || exit 0
|
||||
|
||||
- name: Archive logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: output-logs
|
||||
path: docker/output.log
|
||||
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: docker compose -f ./compose/e2e-mount.yml down --volumes --remove-orphans --rmi all
|
234
.github/workflows/fuse-integration.yml
vendored
234
.github/workflows/fuse-integration.yml
vendored
|
@ -1,234 +0,0 @@
|
|||
name: "FUSE Integration Tests"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, main ]
|
||||
paths:
|
||||
- 'weed/**'
|
||||
- 'test/fuse_integration/**'
|
||||
- '.github/workflows/fuse-integration.yml'
|
||||
pull_request:
|
||||
branches: [ master, main ]
|
||||
paths:
|
||||
- 'weed/**'
|
||||
- 'test/fuse_integration/**'
|
||||
- '.github/workflows/fuse-integration.yml'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.head_ref }}/fuse-integration
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
GO_VERSION: '1.21'
|
||||
TEST_TIMEOUT: '45m'
|
||||
|
||||
jobs:
|
||||
fuse-integration:
|
||||
name: FUSE Integration Testing
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 50
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go ${{ env.GO_VERSION }}
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- name: Install FUSE and dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y fuse libfuse-dev
|
||||
# Verify FUSE installation
|
||||
fusermount --version || true
|
||||
ls -la /dev/fuse || true
|
||||
|
||||
- name: Build SeaweedFS
|
||||
run: |
|
||||
cd weed
|
||||
go build -tags "elastic gocdk sqlite ydb tarantool tikv rclone" -v .
|
||||
chmod +x weed
|
||||
# Verify binary
|
||||
./weed version
|
||||
|
||||
- name: Prepare FUSE Integration Tests
|
||||
run: |
|
||||
# Create isolated test directory to avoid Go module conflicts
|
||||
mkdir -p /tmp/seaweedfs-fuse-tests
|
||||
|
||||
# Copy only the working test files to avoid Go module conflicts
|
||||
# These are the files we've verified work without package name issues
|
||||
cp test/fuse_integration/simple_test.go /tmp/seaweedfs-fuse-tests/ 2>/dev/null || echo "⚠️ simple_test.go not found"
|
||||
cp test/fuse_integration/working_demo_test.go /tmp/seaweedfs-fuse-tests/ 2>/dev/null || echo "⚠️ working_demo_test.go not found"
|
||||
|
||||
# Note: Other test files (framework.go, basic_operations_test.go, etc.)
|
||||
# have Go module conflicts and are skipped until resolved
|
||||
|
||||
echo "📁 Working test files copied:"
|
||||
ls -la /tmp/seaweedfs-fuse-tests/*.go 2>/dev/null || echo "ℹ️ No test files found"
|
||||
|
||||
# Initialize Go module in isolated directory
|
||||
cd /tmp/seaweedfs-fuse-tests
|
||||
go mod init seaweedfs-fuse-tests
|
||||
go mod tidy
|
||||
|
||||
# Verify setup
|
||||
echo "✅ FUSE integration test environment prepared"
|
||||
ls -la /tmp/seaweedfs-fuse-tests/
|
||||
|
||||
echo ""
|
||||
echo "ℹ️ Current Status: Running working subset of FUSE tests"
|
||||
echo " • simple_test.go: Package structure verification"
|
||||
echo " • working_demo_test.go: Framework capability demonstration"
|
||||
echo " • Full framework: Available in test/fuse_integration/ (module conflicts pending resolution)"
|
||||
|
||||
- name: Run FUSE Integration Tests
|
||||
run: |
|
||||
cd /tmp/seaweedfs-fuse-tests
|
||||
|
||||
echo "🧪 Running FUSE integration tests..."
|
||||
echo "============================================"
|
||||
|
||||
# Run available working test files
|
||||
TESTS_RUN=0
|
||||
|
||||
if [ -f "simple_test.go" ]; then
|
||||
echo "📋 Running simple_test.go..."
|
||||
go test -v -timeout=${{ env.TEST_TIMEOUT }} simple_test.go
|
||||
TESTS_RUN=$((TESTS_RUN + 1))
|
||||
fi
|
||||
|
||||
if [ -f "working_demo_test.go" ]; then
|
||||
echo "📋 Running working_demo_test.go..."
|
||||
go test -v -timeout=${{ env.TEST_TIMEOUT }} working_demo_test.go
|
||||
TESTS_RUN=$((TESTS_RUN + 1))
|
||||
fi
|
||||
|
||||
# Run combined test if multiple files exist
|
||||
if [ -f "simple_test.go" ] && [ -f "working_demo_test.go" ]; then
|
||||
echo "📋 Running combined tests..."
|
||||
go test -v -timeout=${{ env.TEST_TIMEOUT }} simple_test.go working_demo_test.go
|
||||
fi
|
||||
|
||||
if [ $TESTS_RUN -eq 0 ]; then
|
||||
echo "⚠️ No working test files found, running module verification only"
|
||||
go version
|
||||
go mod verify
|
||||
else
|
||||
echo "✅ Successfully ran $TESTS_RUN test file(s)"
|
||||
fi
|
||||
|
||||
echo "============================================"
|
||||
echo "✅ FUSE integration tests completed"
|
||||
|
||||
- name: Run Extended Framework Validation
|
||||
run: |
|
||||
cd /tmp/seaweedfs-fuse-tests
|
||||
|
||||
echo "🔍 Running extended framework validation..."
|
||||
echo "============================================"
|
||||
|
||||
# Test individual components (only run tests that exist)
|
||||
if [ -f "simple_test.go" ]; then
|
||||
echo "Testing simple verification..."
|
||||
go test -v simple_test.go
|
||||
fi
|
||||
|
||||
if [ -f "working_demo_test.go" ]; then
|
||||
echo "Testing framework demo..."
|
||||
go test -v working_demo_test.go
|
||||
fi
|
||||
|
||||
# Test combined execution if both files exist
|
||||
if [ -f "simple_test.go" ] && [ -f "working_demo_test.go" ]; then
|
||||
echo "Testing combined execution..."
|
||||
go test -v simple_test.go working_demo_test.go
|
||||
elif [ -f "simple_test.go" ] || [ -f "working_demo_test.go" ]; then
|
||||
echo "✅ Individual tests already validated above"
|
||||
else
|
||||
echo "⚠️ No working test files found for combined testing"
|
||||
fi
|
||||
|
||||
echo "============================================"
|
||||
echo "✅ Extended validation completed"
|
||||
|
||||
- name: Generate Test Coverage Report
|
||||
run: |
|
||||
cd /tmp/seaweedfs-fuse-tests
|
||||
|
||||
echo "📊 Generating test coverage report..."
|
||||
go test -v -coverprofile=coverage.out .
|
||||
go tool cover -html=coverage.out -o coverage.html
|
||||
|
||||
echo "Coverage report generated: coverage.html"
|
||||
|
||||
- name: Verify SeaweedFS Binary Integration
|
||||
run: |
|
||||
# Test that SeaweedFS binary is accessible from test environment
|
||||
WEED_BINARY=$(pwd)/weed/weed
|
||||
|
||||
if [ -f "$WEED_BINARY" ]; then
|
||||
echo "✅ SeaweedFS binary found at: $WEED_BINARY"
|
||||
$WEED_BINARY version
|
||||
echo "Binary is ready for full integration testing"
|
||||
else
|
||||
echo "❌ SeaweedFS binary not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Upload Test Artifacts
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: fuse-integration-test-results
|
||||
path: |
|
||||
/tmp/seaweedfs-fuse-tests/coverage.out
|
||||
/tmp/seaweedfs-fuse-tests/coverage.html
|
||||
/tmp/seaweedfs-fuse-tests/*.log
|
||||
retention-days: 7
|
||||
|
||||
- name: Test Summary
|
||||
if: always()
|
||||
run: |
|
||||
echo "## 🚀 FUSE Integration Test Summary" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### Framework Status" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- ✅ **Framework Design**: Complete and validated" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- ✅ **Working Tests**: Core framework demonstration functional" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- ⚠️ **Full Framework**: Available but requires Go module resolution" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- ✅ **CI/CD Integration**: Automated testing pipeline established" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### Test Capabilities" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- 📁 **File Operations**: Create, read, write, delete, permissions" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- 📂 **Directory Operations**: Create, list, delete, nested structures" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- 📊 **Large Files**: Multi-megabyte file handling" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- 🔄 **Concurrent Operations**: Multi-threaded stress testing" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- ⚠️ **Error Scenarios**: Comprehensive error handling validation" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### Comparison with Current Tests" >> $GITHUB_STEP_SUMMARY
|
||||
echo "| Aspect | Current (FIO) | This Framework |" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|--------|---------------|----------------|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "| **Scope** | Performance only | Functional + Performance |" >> $GITHUB_STEP_SUMMARY
|
||||
echo "| **Operations** | Read/Write only | All FUSE operations |" >> $GITHUB_STEP_SUMMARY
|
||||
echo "| **Concurrency** | Single-threaded | Multi-threaded stress tests |" >> $GITHUB_STEP_SUMMARY
|
||||
echo "| **Automation** | Manual setup | Fully automated |" >> $GITHUB_STEP_SUMMARY
|
||||
echo "| **Validation** | Speed metrics | Correctness + Performance |" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### Current Working Tests" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- ✅ **Framework Structure**: Package and module verification" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- ✅ **Configuration Management**: Test config validation" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- ✅ **File Operations Demo**: Basic file create/read/write simulation" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- ✅ **Large File Handling**: 1MB+ file processing demonstration" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- ✅ **Concurrency Simulation**: Multi-file operation testing" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### Next Steps" >> $GITHUB_STEP_SUMMARY
|
||||
echo "1. **Module Resolution**: Fix Go package conflicts for full framework" >> $GITHUB_STEP_SUMMARY
|
||||
echo "2. **SeaweedFS Integration**: Connect with real cluster for end-to-end testing" >> $GITHUB_STEP_SUMMARY
|
||||
echo "3. **Performance Benchmarks**: Add performance regression testing" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "📈 **Total Framework Size**: ~1,500 lines of comprehensive testing infrastructure" >> $GITHUB_STEP_SUMMARY
|
40
.github/workflows/go.yml
vendored
40
.github/workflows/go.yml
vendored
|
@ -1,40 +0,0 @@
|
|||
name: "go: build binary"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.head_ref }}/go
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@fa96338abe5531f6e34c5cc0bbe28c1a533d5505 # v2
|
||||
with:
|
||||
go-version: ^1.13
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2
|
||||
|
||||
- name: Get dependencies
|
||||
run: |
|
||||
cd weed; go get -v -t -d ./...
|
||||
|
||||
- name: Build
|
||||
run: cd weed; go build -tags "elastic gocdk sqlite ydb tarantool tikv rclone" -v .
|
||||
|
||||
- name: Test
|
||||
run: cd weed; go test -tags "elastic gocdk sqlite ydb tarantool tikv rclone" -v ./...
|
23
.github/workflows/helm_chart_release.yml
vendored
23
.github/workflows/helm_chart_release.yml
vendored
|
@ -1,23 +0,0 @@
|
|||
name: "helm: publish charts"
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pages: write
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
- name: Publish Helm charts
|
||||
uses: stefanprodan/helm-gh-pages@master
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
charts_dir: k8s/charts
|
||||
target_dir: helm
|
||||
branch: gh-pages
|
||||
helm_version: v3.18.4
|
51
.github/workflows/helm_ci.yml
vendored
51
.github/workflows/helm_ci.yml
vendored
|
@ -1,51 +0,0 @@
|
|||
name: "helm: lint and test charts"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths: ['k8s/**']
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
paths: ['k8s/**']
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
lint-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4
|
||||
with:
|
||||
version: v3.18.4
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.9'
|
||||
check-latest: true
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.7.0
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
run: |
|
||||
changed=$(ct list-changed --target-branch ${{ github.event.repository.default_branch }} --chart-dirs k8s/charts)
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "::set-output name=changed::true"
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
run: ct lint --target-branch ${{ github.event.repository.default_branch }} --all --validate-maintainers=false --chart-dirs k8s/charts
|
||||
|
||||
- name: Create kind cluster
|
||||
uses: helm/kind-action@v1.12.0
|
||||
|
||||
- name: Run chart-testing (install)
|
||||
run: ct install --target-branch ${{ github.event.repository.default_branch }} --all --chart-dirs k8s/charts
|
412
.github/workflows/s3-go-tests.yml
vendored
412
.github/workflows/s3-go-tests.yml
vendored
|
@ -1,412 +0,0 @@
|
|||
name: "S3 Go Tests"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.head_ref }}/s3-go-tests
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: weed
|
||||
|
||||
jobs:
|
||||
s3-versioning-tests:
|
||||
name: S3 Versioning Tests
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
matrix:
|
||||
test-type: ["quick", "comprehensive"]
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
id: go
|
||||
|
||||
- name: Install SeaweedFS
|
||||
run: |
|
||||
go install -buildvcs=false
|
||||
|
||||
- name: Run S3 Versioning Tests - ${{ matrix.test-type }}
|
||||
timeout-minutes: 25
|
||||
working-directory: test/s3/versioning
|
||||
run: |
|
||||
set -x
|
||||
echo "=== System Information ==="
|
||||
uname -a
|
||||
free -h
|
||||
df -h
|
||||
echo "=== Starting Tests ==="
|
||||
|
||||
# Run tests with automatic server management
|
||||
# The test-with-server target handles server startup/shutdown automatically
|
||||
if [ "${{ matrix.test-type }}" = "quick" ]; then
|
||||
# Override TEST_PATTERN for quick tests only
|
||||
make test-with-server TEST_PATTERN="TestBucketListReturnDataVersioning|TestVersioningBasicWorkflow|TestVersioningDeleteMarkers"
|
||||
else
|
||||
# Run all versioning tests
|
||||
make test-with-server
|
||||
fi
|
||||
|
||||
- name: Show server logs on failure
|
||||
if: failure()
|
||||
working-directory: test/s3/versioning
|
||||
run: |
|
||||
echo "=== Server Logs ==="
|
||||
if [ -f weed-test.log ]; then
|
||||
echo "Last 100 lines of server logs:"
|
||||
tail -100 weed-test.log
|
||||
else
|
||||
echo "No server log file found"
|
||||
fi
|
||||
|
||||
echo "=== Test Environment ==="
|
||||
ps aux | grep -E "(weed|test)" || true
|
||||
netstat -tlnp | grep -E "(8333|9333|8080)" || true
|
||||
|
||||
- name: Upload test logs on failure
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: s3-versioning-test-logs-${{ matrix.test-type }}
|
||||
path: test/s3/versioning/weed-test*.log
|
||||
retention-days: 3
|
||||
|
||||
s3-versioning-compatibility:
|
||||
name: S3 Versioning Compatibility Test
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 20
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
id: go
|
||||
|
||||
- name: Install SeaweedFS
|
||||
run: |
|
||||
go install -buildvcs=false
|
||||
|
||||
- name: Run Core Versioning Test (Python s3tests equivalent)
|
||||
timeout-minutes: 15
|
||||
working-directory: test/s3/versioning
|
||||
run: |
|
||||
set -x
|
||||
echo "=== System Information ==="
|
||||
uname -a
|
||||
free -h
|
||||
|
||||
# Run the specific test that is equivalent to the Python s3tests
|
||||
make test-with-server || {
|
||||
echo "❌ Test failed, checking logs..."
|
||||
if [ -f weed-test.log ]; then
|
||||
echo "=== Server logs ==="
|
||||
tail -100 weed-test.log
|
||||
fi
|
||||
echo "=== Process information ==="
|
||||
ps aux | grep -E "(weed|test)" || true
|
||||
exit 1
|
||||
}
|
||||
|
||||
- name: Upload server logs on failure
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: s3-versioning-compatibility-logs
|
||||
path: test/s3/versioning/weed-test*.log
|
||||
retention-days: 3
|
||||
|
||||
s3-cors-compatibility:
|
||||
name: S3 CORS Compatibility Test
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 20
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
id: go
|
||||
|
||||
- name: Install SeaweedFS
|
||||
run: |
|
||||
go install -buildvcs=false
|
||||
|
||||
- name: Run Core CORS Test (AWS S3 compatible)
|
||||
timeout-minutes: 15
|
||||
working-directory: test/s3/cors
|
||||
run: |
|
||||
set -x
|
||||
echo "=== System Information ==="
|
||||
uname -a
|
||||
free -h
|
||||
|
||||
# Run the specific test that is equivalent to AWS S3 CORS behavior
|
||||
make test-with-server || {
|
||||
echo "❌ Test failed, checking logs..."
|
||||
if [ -f weed-test.log ]; then
|
||||
echo "=== Server logs ==="
|
||||
tail -100 weed-test.log
|
||||
fi
|
||||
echo "=== Process information ==="
|
||||
ps aux | grep -E "(weed|test)" || true
|
||||
exit 1
|
||||
}
|
||||
|
||||
- name: Upload server logs on failure
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: s3-cors-compatibility-logs
|
||||
path: test/s3/cors/weed-test*.log
|
||||
retention-days: 3
|
||||
|
||||
s3-retention-tests:
|
||||
name: S3 Retention Tests
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
matrix:
|
||||
test-type: ["quick", "comprehensive"]
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
id: go
|
||||
|
||||
- name: Install SeaweedFS
|
||||
run: |
|
||||
go install -buildvcs=false
|
||||
|
||||
- name: Run S3 Retention Tests - ${{ matrix.test-type }}
|
||||
timeout-minutes: 25
|
||||
working-directory: test/s3/retention
|
||||
run: |
|
||||
set -x
|
||||
echo "=== System Information ==="
|
||||
uname -a
|
||||
free -h
|
||||
df -h
|
||||
echo "=== Starting Tests ==="
|
||||
|
||||
# Run tests with automatic server management
|
||||
# The test-with-server target handles server startup/shutdown automatically
|
||||
if [ "${{ matrix.test-type }}" = "quick" ]; then
|
||||
# Override TEST_PATTERN for quick tests only
|
||||
make test-with-server TEST_PATTERN="TestBasicRetentionWorkflow|TestRetentionModeCompliance|TestLegalHoldWorkflow"
|
||||
else
|
||||
# Run all retention tests
|
||||
make test-with-server
|
||||
fi
|
||||
|
||||
- name: Show server logs on failure
|
||||
if: failure()
|
||||
working-directory: test/s3/retention
|
||||
run: |
|
||||
echo "=== Server Logs ==="
|
||||
if [ -f weed-test.log ]; then
|
||||
echo "Last 100 lines of server logs:"
|
||||
tail -100 weed-test.log
|
||||
else
|
||||
echo "No server log file found"
|
||||
fi
|
||||
|
||||
echo "=== Test Environment ==="
|
||||
ps aux | grep -E "(weed|test)" || true
|
||||
netstat -tlnp | grep -E "(8333|9333|8080)" || true
|
||||
|
||||
- name: Upload test logs on failure
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: s3-retention-test-logs-${{ matrix.test-type }}
|
||||
path: test/s3/retention/weed-test*.log
|
||||
retention-days: 3
|
||||
|
||||
s3-cors-tests:
|
||||
name: S3 CORS Tests
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
matrix:
|
||||
test-type: ["quick", "comprehensive"]
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
id: go
|
||||
|
||||
- name: Install SeaweedFS
|
||||
run: |
|
||||
go install -buildvcs=false
|
||||
|
||||
- name: Run S3 CORS Tests - ${{ matrix.test-type }}
|
||||
timeout-minutes: 25
|
||||
working-directory: test/s3/cors
|
||||
run: |
|
||||
set -x
|
||||
echo "=== System Information ==="
|
||||
uname -a
|
||||
free -h
|
||||
df -h
|
||||
echo "=== Starting Tests ==="
|
||||
|
||||
# Run tests with automatic server management
|
||||
# The test-with-server target handles server startup/shutdown automatically
|
||||
if [ "${{ matrix.test-type }}" = "quick" ]; then
|
||||
# Override TEST_PATTERN for quick tests only
|
||||
make test-with-server TEST_PATTERN="TestCORSConfigurationManagement|TestServiceLevelCORS|TestCORSBasicWorkflow"
|
||||
else
|
||||
# Run all CORS tests
|
||||
make test-with-server
|
||||
fi
|
||||
|
||||
- name: Show server logs on failure
|
||||
if: failure()
|
||||
working-directory: test/s3/cors
|
||||
run: |
|
||||
echo "=== Server Logs ==="
|
||||
if [ -f weed-test.log ]; then
|
||||
echo "Last 100 lines of server logs:"
|
||||
tail -100 weed-test.log
|
||||
else
|
||||
echo "No server log file found"
|
||||
fi
|
||||
|
||||
echo "=== Test Environment ==="
|
||||
ps aux | grep -E "(weed|test)" || true
|
||||
netstat -tlnp | grep -E "(8333|9333|8080)" || true
|
||||
|
||||
- name: Upload test logs on failure
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: s3-cors-test-logs-${{ matrix.test-type }}
|
||||
path: test/s3/cors/weed-test*.log
|
||||
retention-days: 3
|
||||
|
||||
s3-retention-worm:
|
||||
name: S3 Retention WORM Integration Test
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 20
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
id: go
|
||||
|
||||
- name: Install SeaweedFS
|
||||
run: |
|
||||
go install -buildvcs=false
|
||||
|
||||
- name: Run WORM Integration Tests
|
||||
timeout-minutes: 15
|
||||
working-directory: test/s3/retention
|
||||
run: |
|
||||
set -x
|
||||
echo "=== System Information ==="
|
||||
uname -a
|
||||
free -h
|
||||
|
||||
# Run the WORM integration tests with automatic server management
|
||||
# The test-with-server target handles server startup/shutdown automatically
|
||||
make test-with-server TEST_PATTERN="TestWORM|TestRetentionExtendedAttributes|TestRetentionConcurrentOperations" || {
|
||||
echo "❌ WORM integration test failed, checking logs..."
|
||||
if [ -f weed-test.log ]; then
|
||||
echo "=== Server logs ==="
|
||||
tail -100 weed-test.log
|
||||
fi
|
||||
echo "=== Process information ==="
|
||||
ps aux | grep -E "(weed|test)" || true
|
||||
exit 1
|
||||
}
|
||||
|
||||
- name: Upload server logs on failure
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: s3-retention-worm-logs
|
||||
path: test/s3/retention/weed-test*.log
|
||||
retention-days: 3
|
||||
|
||||
s3-versioning-stress:
|
||||
name: S3 Versioning Stress Test
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 35
|
||||
# Only run stress tests on master branch pushes to avoid overloading PR testing
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/master'
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
id: go
|
||||
|
||||
- name: Install SeaweedFS
|
||||
run: |
|
||||
go install -buildvcs=false
|
||||
|
||||
- name: Run S3 Versioning Stress Tests
|
||||
timeout-minutes: 30
|
||||
working-directory: test/s3/versioning
|
||||
run: |
|
||||
set -x
|
||||
echo "=== System Information ==="
|
||||
uname -a
|
||||
free -h
|
||||
|
||||
# Run stress tests (concurrent operations)
|
||||
make test-versioning-stress || {
|
||||
echo "❌ Stress test failed, checking logs..."
|
||||
if [ -f weed-test.log ]; then
|
||||
echo "=== Server logs ==="
|
||||
tail -200 weed-test.log
|
||||
fi
|
||||
make clean
|
||||
exit 1
|
||||
}
|
||||
make clean
|
||||
|
||||
- name: Upload stress test logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: s3-versioning-stress-logs
|
||||
path: test/s3/versioning/weed-test*.log
|
||||
retention-days: 7
|
1083
.github/workflows/s3tests.yml
vendored
1083
.github/workflows/s3tests.yml
vendored
File diff suppressed because it is too large
Load diff
|
@ -1,79 +0,0 @@
|
|||
name: "test s3 over https using aws-cli"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, test-https-s3-awscli]
|
||||
pull_request:
|
||||
branches: [master, test-https-s3-awscli]
|
||||
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: some_access_key1
|
||||
AWS_SECRET_ACCESS_KEY: some_secret_key1
|
||||
AWS_ENDPOINT_URL: https://localhost:8443
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: weed
|
||||
|
||||
jobs:
|
||||
awscli-tests:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v5.5.0
|
||||
with:
|
||||
go-version: ^1.24
|
||||
|
||||
- name: Build SeaweedFS
|
||||
run: |
|
||||
go build
|
||||
|
||||
- name: Start SeaweedFS
|
||||
run: |
|
||||
set -e
|
||||
mkdir -p /tmp/data
|
||||
./weed server -s3 -dir=/tmp/data -s3.config=../docker/compose/s3.json &
|
||||
until curl -s http://localhost:8333/ > /dev/null; do sleep 1; done
|
||||
|
||||
- name: Setup Caddy
|
||||
run: |
|
||||
curl -fsSL "https://caddyserver.com/api/download?os=linux&arch=amd64" -o caddy
|
||||
chmod +x caddy
|
||||
./caddy version
|
||||
echo "{
|
||||
auto_https disable_redirects
|
||||
local_certs
|
||||
}
|
||||
localhost:8443 {
|
||||
tls internal
|
||||
reverse_proxy localhost:8333
|
||||
}" > Caddyfile
|
||||
|
||||
- name: Start Caddy
|
||||
run: |
|
||||
./caddy start
|
||||
until curl -fsS --insecure https://localhost:8443 > /dev/null; do sleep 1; done
|
||||
|
||||
- name: Create Bucket
|
||||
run: |
|
||||
aws --no-verify-ssl s3api create-bucket --bucket bucket
|
||||
|
||||
- name: Test PutObject
|
||||
run: |
|
||||
set -e
|
||||
dd if=/dev/urandom of=generated bs=1M count=2
|
||||
aws --no-verify-ssl s3api put-object --bucket bucket --key test-putobject --body generated
|
||||
aws --no-verify-ssl s3api get-object --bucket bucket --key test-putobject downloaded
|
||||
diff -q generated downloaded
|
||||
rm -f generated downloaded
|
||||
|
||||
- name: Test Multi-part Upload
|
||||
run: |
|
||||
set -e
|
||||
dd if=/dev/urandom of=generated bs=1M count=32
|
||||
aws --no-verify-ssl s3 cp --no-progress generated s3://bucket/test-multipart
|
||||
aws --no-verify-ssl s3 cp --no-progress s3://bucket/test-multipart downloaded
|
||||
diff -q generated downloaded
|
||||
rm -f generated downloaded
|
34
.gitignore
vendored
34
.gitignore
vendored
|
@ -55,8 +55,6 @@ Temporary Items
|
|||
# Mongo Explorer plugin:
|
||||
# .idea/mongoSettings.xml
|
||||
|
||||
## vscode
|
||||
.vscode
|
||||
## File-based project format:
|
||||
*.ipr
|
||||
*.iws
|
||||
|
@ -77,38 +75,6 @@ com_crashlytics_export_strings.xml
|
|||
crashlytics.properties
|
||||
crashlytics-build.properties
|
||||
|
||||
workspace/
|
||||
|
||||
test_data
|
||||
build
|
||||
target
|
||||
*.class
|
||||
other/java/hdfs/dependency-reduced-pom.xml
|
||||
|
||||
# binary file
|
||||
weed/weed
|
||||
docker/weed
|
||||
|
||||
# test generated files
|
||||
weed/*/*.jpg
|
||||
docker/weed_sub
|
||||
docker/weed_pub
|
||||
weed/mq/schema/example.parquet
|
||||
docker/agent_sub_record
|
||||
test/mq/bin/consumer
|
||||
test/mq/bin/producer
|
||||
test/producer
|
||||
bin/weed
|
||||
weed_binary
|
||||
/test/s3/copying/filerldb2
|
||||
/filerldb2
|
||||
/test/s3/retention/test-volume-data
|
||||
test/s3/cors/weed-test.log
|
||||
test/s3/cors/weed-server.pid
|
||||
/test/s3/cors/test-volume-data
|
||||
test/s3/cors/cors.test
|
||||
/test/s3/retention/filerldb2
|
||||
test/s3/retention/weed-server.pid
|
||||
test/s3/retention/weed-test.log
|
||||
/test/s3/versioning/test-volume-data
|
||||
test/s3/versioning/weed-test.log
|
||||
|
|
13
.project
Normal file
13
.project
Normal file
|
@ -0,0 +1,13 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<projectDescription>
|
||||
<name>seaweedfs</name>
|
||||
<comment></comment>
|
||||
<projects>
|
||||
</projects>
|
||||
<buildSpec>
|
||||
</buildSpec>
|
||||
<natures>
|
||||
<nature>goclipse.goNature</nature>
|
||||
<nature>org.eclipse.wst.common.project.facet.core.nature</nature>
|
||||
</natures>
|
||||
</projectDescription>
|
44
.travis.yml
Normal file
44
.travis.yml
Normal file
|
@ -0,0 +1,44 @@
|
|||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- tip
|
||||
|
||||
before_install:
|
||||
- export PATH=/home/travis/gopath/bin:$PATH
|
||||
|
||||
install:
|
||||
- go get ./weed/...
|
||||
|
||||
script:
|
||||
- go test ./weed/...
|
||||
|
||||
before_deploy:
|
||||
- make release
|
||||
deploy:
|
||||
provider: releases
|
||||
skip_cleanup: true
|
||||
api_key:
|
||||
secure: ERL986+ncQ8lwAJUYDrQ8s2/FxF/cyNIwJIFCqspnWxQgGNNyokET9HapmlPSxjpFRF0q6L2WCg9OY3mSVRq4oI6hg1igOQ12KlLyN71XSJ3c8w0Ay5ho48TQ9l3f3Iu97mntBCe9l0R9pnT8wj1VI8YJxloXwUMG2yeTjA9aBI=
|
||||
file:
|
||||
- build/linux_arm.tar.gz
|
||||
- build/linux_arm64.tar.gz
|
||||
- build/linux_386.tar.gz
|
||||
- build/linux_amd64.tar.gz
|
||||
- build/darwin_amd64.tar.gz
|
||||
- build/windows_386.zip
|
||||
- build/windows_amd64.zip
|
||||
- build/freebsd_arm.tar.gz
|
||||
- build/freebsd_amd64.tar.gz
|
||||
- build/freebsd_386.tar.gz
|
||||
- build/netbsd_arm.tar.gz
|
||||
- build/netbsd_amd64.tar.gz
|
||||
- build/netbsd_386.tar.gz
|
||||
- build/openbsd_arm.tar.gz
|
||||
- build/openbsd_amd64.tar.gz
|
||||
- build/openbsd_386.tar.gz
|
||||
on:
|
||||
tags: true
|
||||
repo: chrislusf/seaweedfs
|
||||
go: tip
|
|
@ -1,74 +0,0 @@
|
|||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to make participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||
nationality, personal appearance, race, religion, or sexual identity and
|
||||
orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
- Using welcoming and inclusive language
|
||||
- Being respectful of differing viewpoints and experiences
|
||||
- Gracefully accepting constructive criticism
|
||||
- Focusing on what is best for the community
|
||||
- Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
- The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
- Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
- Public or private harassment
|
||||
- Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
- Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at <enteremailhere>. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
6
Dockerfile.go_build
Normal file
6
Dockerfile.go_build
Normal file
|
@ -0,0 +1,6 @@
|
|||
FROM cydev/go
|
||||
RUN go get github.com/chrislusf/seaweedfs/weed
|
||||
EXPOSE 8080
|
||||
EXPOSE 9333
|
||||
VOLUME /data
|
||||
ENTRYPOINT ["weed"]
|
2
LICENSE
2
LICENSE
|
@ -186,7 +186,7 @@
|
|||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2025 Chris Lu
|
||||
Copyright 2016 Chris Lu
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
|
148
Makefile
148
Makefile
|
@ -1,71 +1,113 @@
|
|||
.PHONY: test admin-generate admin-build admin-clean admin-dev admin-run admin-test admin-fmt admin-help
|
||||
BINARY = weed/weed
|
||||
package = github.com/chrislusf/seaweedfs/weed
|
||||
|
||||
BINARY = weed
|
||||
ADMIN_DIR = weed/admin
|
||||
GO_FLAGS = #-v
|
||||
SOURCE_DIR = ./weed/
|
||||
|
||||
SOURCE_DIR = .
|
||||
debug ?= 0
|
||||
appname := weed
|
||||
|
||||
all: install
|
||||
sources := $(wildcard *.go)
|
||||
|
||||
install: admin-generate
|
||||
cd weed; go install
|
||||
build = GOOS=$(1) GOARCH=$(2) go build -o build/$(appname)$(3) $(SOURCE_DIR)
|
||||
tar = cd build && tar -cvzf $(1)_$(2).tar.gz $(appname)$(3) && rm $(appname)$(3)
|
||||
zip = cd build && zip $(1)_$(2).zip $(appname)$(3) && rm $(appname)$(3)
|
||||
|
||||
warp_install:
|
||||
go install github.com/minio/warp@v0.7.6
|
||||
|
||||
full_install: admin-generate
|
||||
cd weed; go install -tags "elastic gocdk sqlite ydb tarantool tikv rclone"
|
||||
all: build
|
||||
|
||||
server: install
|
||||
weed -v 0 server -s3 -filer -filer.maxMB=64 -volume.max=0 -master.volumeSizeLimitMB=100 -volume.preStopSeconds=1 -s3.port=8000 -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config=./docker/compose/s3.json -metricsPort=9324
|
||||
.PHONY : clean deps build linux release windows_build darwin_build linux_build bsd_build clean
|
||||
|
||||
benchmark: install warp_install
|
||||
pkill weed || true
|
||||
pkill warp || true
|
||||
weed server -debug=$(debug) -s3 -filer -volume.max=0 -master.volumeSizeLimitMB=100 -volume.preStopSeconds=1 -s3.port=8000 -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=false -s3.config=./docker/compose/s3.json &
|
||||
warp client &
|
||||
while ! nc -z localhost 8000 ; do sleep 1 ; done
|
||||
warp mixed --host=127.0.0.1:8000 --access-key=some_access_key1 --secret-key=some_secret_key1 --autoterm
|
||||
pkill warp
|
||||
pkill weed
|
||||
clean:
|
||||
go clean -i $(GO_FLAGS) $(SOURCE_DIR)
|
||||
rm -f $(BINARY)
|
||||
rm -rf build/
|
||||
|
||||
# curl -o profile "http://127.0.0.1:6060/debug/pprof/profile?debug=1"
|
||||
benchmark_with_pprof: debug = 1
|
||||
benchmark_with_pprof: benchmark
|
||||
deps:
|
||||
go get $(GO_FLAGS) -d $(SOURCE_DIR)
|
||||
|
||||
test: admin-generate
|
||||
cd weed; go test -tags "elastic gocdk sqlite ydb tarantool tikv rclone" -v ./...
|
||||
build: deps
|
||||
go build $(GO_FLAGS) -o $(BINARY) $(SOURCE_DIR)
|
||||
|
||||
# Admin component targets
|
||||
admin-generate:
|
||||
@echo "Generating admin component templates..."
|
||||
@cd $(ADMIN_DIR) && $(MAKE) generate
|
||||
linux: deps
|
||||
mkdir -p linux
|
||||
GOOS=linux GOARCH=amd64 go build $(GO_FLAGS) -o linux/$(BINARY) $(SOURCE_DIR)
|
||||
|
||||
admin-build: admin-generate
|
||||
@echo "Building admin component..."
|
||||
@cd $(ADMIN_DIR) && $(MAKE) build
|
||||
release: deps windows_build darwin_build linux_build bsd_build
|
||||
|
||||
admin-clean:
|
||||
@echo "Cleaning admin component..."
|
||||
@cd $(ADMIN_DIR) && $(MAKE) clean
|
||||
##### LINUX BUILDS #####
|
||||
linux_build: build/linux_arm.tar.gz build/linux_arm64.tar.gz build/linux_386.tar.gz build/linux_amd64.tar.gz
|
||||
|
||||
admin-dev:
|
||||
@echo "Starting admin development server..."
|
||||
@cd $(ADMIN_DIR) && $(MAKE) dev
|
||||
build/linux_386.tar.gz: $(sources)
|
||||
$(call build,linux,386,)
|
||||
$(call tar,linux,386)
|
||||
|
||||
admin-run:
|
||||
@echo "Running admin server..."
|
||||
@cd $(ADMIN_DIR) && $(MAKE) run
|
||||
build/linux_amd64.tar.gz: $(sources)
|
||||
$(call build,linux,amd64,)
|
||||
$(call tar,linux,amd64)
|
||||
|
||||
admin-test:
|
||||
@echo "Testing admin component..."
|
||||
@cd $(ADMIN_DIR) && $(MAKE) test
|
||||
build/linux_arm.tar.gz: $(sources)
|
||||
$(call build,linux,arm,)
|
||||
$(call tar,linux,arm)
|
||||
|
||||
admin-fmt:
|
||||
@echo "Formatting admin component..."
|
||||
@cd $(ADMIN_DIR) && $(MAKE) fmt
|
||||
build/linux_arm64.tar.gz: $(sources)
|
||||
$(call build,linux,arm64,)
|
||||
$(call tar,linux,arm64)
|
||||
|
||||
admin-help:
|
||||
@echo "Admin component help..."
|
||||
@cd $(ADMIN_DIR) && $(MAKE) help
|
||||
##### DARWIN (MAC) BUILDS #####
|
||||
darwin_build: build/darwin_amd64.tar.gz
|
||||
|
||||
build/darwin_amd64.tar.gz: $(sources)
|
||||
$(call build,darwin,amd64,)
|
||||
$(call tar,darwin,amd64)
|
||||
|
||||
##### WINDOWS BUILDS #####
|
||||
windows_build: build/windows_386.zip build/windows_amd64.zip
|
||||
|
||||
build/windows_386.zip: $(sources)
|
||||
$(call build,windows,386,.exe)
|
||||
$(call zip,windows,386,.exe)
|
||||
|
||||
build/windows_amd64.zip: $(sources)
|
||||
$(call build,windows,amd64,.exe)
|
||||
$(call zip,windows,amd64,.exe)
|
||||
|
||||
##### BSD BUILDS #####
|
||||
bsd_build: build/freebsd_arm.tar.gz build/freebsd_386.tar.gz build/freebsd_amd64.tar.gz \
|
||||
build/netbsd_arm.tar.gz build/netbsd_386.tar.gz build/netbsd_amd64.tar.gz \
|
||||
build/openbsd_arm.tar.gz build/openbsd_386.tar.gz build/openbsd_amd64.tar.gz
|
||||
|
||||
build/freebsd_386.tar.gz: $(sources)
|
||||
$(call build,freebsd,386,)
|
||||
$(call tar,freebsd,386)
|
||||
|
||||
build/freebsd_amd64.tar.gz: $(sources)
|
||||
$(call build,freebsd,amd64,)
|
||||
$(call tar,freebsd,amd64)
|
||||
|
||||
build/freebsd_arm.tar.gz: $(sources)
|
||||
$(call build,freebsd,arm,)
|
||||
$(call tar,freebsd,arm)
|
||||
|
||||
build/netbsd_386.tar.gz: $(sources)
|
||||
$(call build,netbsd,386,)
|
||||
$(call tar,netbsd,386)
|
||||
|
||||
build/netbsd_amd64.tar.gz: $(sources)
|
||||
$(call build,netbsd,amd64,)
|
||||
$(call tar,netbsd,amd64)
|
||||
|
||||
build/netbsd_arm.tar.gz: $(sources)
|
||||
$(call build,netbsd,arm,)
|
||||
$(call tar,netbsd,arm)
|
||||
|
||||
build/openbsd_386.tar.gz: $(sources)
|
||||
$(call build,openbsd,386,)
|
||||
$(call tar,openbsd,386)
|
||||
|
||||
build/openbsd_amd64.tar.gz: $(sources)
|
||||
$(call build,openbsd,amd64,)
|
||||
$(call tar,openbsd,amd64)
|
||||
|
||||
build/openbsd_arm.tar.gz: $(sources)
|
||||
$(call build,openbsd,arm,)
|
||||
$(call tar,openbsd,arm)
|
||||
|
|
514
README.md
514
README.md
|
@ -1,27 +1,25 @@
|
|||
# SeaweedFS
|
||||
|
||||
[](https://travis-ci.org/chrislusf/seaweedfs)
|
||||
[](https://godoc.org/github.com/chrislusf/seaweedfs/weed)
|
||||
[](https://github.com/chrislusf/seaweedfs/wiki)
|
||||
|
||||
[](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTEyYzZmZWYzOGQ3MDJlZWMzYmI0OTE4OTJiZjJjODBmMzUxNmYwODg0YjY3MTNlMjBmZDQ1NzQ5NDJhZWI2ZmY)
|
||||
[](https://twitter.com/intent/follow?screen_name=seaweedfs)
|
||||
[](https://github.com/seaweedfs/seaweedfs/actions/workflows/go.yml)
|
||||
[](https://godoc.org/github.com/seaweedfs/seaweedfs/weed)
|
||||
[](https://github.com/seaweedfs/seaweedfs/wiki)
|
||||
[](https://hub.docker.com/r/chrislusf/seaweedfs/)
|
||||
[](https://search.maven.org/search?q=g:com.github.chrislusf)
|
||||
[](https://artifacthub.io/packages/search?repo=seaweedfs)
|
||||

|
||||
|
||||

|
||||
<h2 align="center">Supporting SeaweedFS</h2>
|
||||
|
||||
<h2 align="center"><a href="https://www.patreon.com/seaweedfs">Sponsor SeaweedFS via Patreon</a></h2>
|
||||
SeaweedFS is Apache-licensed open source project, independent project with its ongoing development made
|
||||
possible entirely thanks to the support by these awesome [backers](https://github.com/chrislusf/seaweedfs/blob/master/backers.md).
|
||||
If you'd like to grow SeaweedFS even stronger, please consider to
|
||||
<a href="https://www.patreon.com/seaweedfs">Sponsor SeaweedFS via Patreon</a>.
|
||||
|
||||
SeaweedFS is an independent Apache-licensed open source project with its ongoing development made
|
||||
possible entirely thanks to the support of these awesome [backers](https://github.com/seaweedfs/seaweedfs/blob/master/backers.md).
|
||||
If you'd like to grow SeaweedFS even stronger, please consider joining our
|
||||
<a href="https://www.patreon.com/seaweedfs">sponsors on Patreon</a>.
|
||||
Platinum ($2500/month), Gold ($500/month): put your company logo on the SeaweedFS github page
|
||||
Generous Backer($50/month), Backer($10/month): put your name on the SeaweedFS backer page.
|
||||
|
||||
Your support will be really appreciated by me and other supporters!
|
||||
|
||||
<!--
|
||||
<h3 align="center"><a href="https://www.patreon.com/seaweedfs">Sponsors SeaweedFS via Patreon</a></h3>
|
||||
|
||||
<h4 align="center">Platinum</h4>
|
||||
|
||||
<p align="center">
|
||||
|
@ -29,175 +27,74 @@ Your support will be really appreciated by me and other supporters!
|
|||
Add your name or icon here
|
||||
</a>
|
||||
</p>
|
||||
-->
|
||||
|
||||
### Gold Sponsors
|
||||
[](https://www.nodion.com)
|
||||
[](https://www.piknik.com)
|
||||
[](https://www.keepsec.ca)
|
||||
<h4 align="center">Gold</h4>
|
||||
|
||||
<table>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td align="center" valign="middle">
|
||||
<a href="" target="_blank">
|
||||
Add your name or icon here
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
---
|
||||
|
||||
- [Download Binaries for different platforms](https://github.com/seaweedfs/seaweedfs/releases/latest)
|
||||
- [SeaweedFS on Slack](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTEyYzZmZWYzOGQ3MDJlZWMzYmI0OTE4OTJiZjJjODBmMzUxNmYwODg0YjY3MTNlMjBmZDQ1NzQ5NDJhZWI2ZmY)
|
||||
- [SeaweedFS on Twitter](https://twitter.com/SeaweedFS)
|
||||
- [SeaweedFS on Telegram](https://t.me/Seaweedfs)
|
||||
- [SeaweedFS on Reddit](https://www.reddit.com/r/SeaweedFS/)
|
||||
|
||||
- [Download Binaries for different platforms](https://github.com/chrislusf/seaweedfs/releases/latest)
|
||||
- [SeaweedFS on Slack](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTc4MmVlYmFlNjBmZTgzZmJlYmI1MDE1YzkyNWYyZjkwZDFiM2RlMDdjNjVlNjdjYzc4NGFhZGIyYzEyMzJkYTA)
|
||||
- [SeaweedFS Mailing List](https://groups.google.com/d/forum/seaweedfs)
|
||||
- [Wiki Documentation](https://github.com/seaweedfs/seaweedfs/wiki)
|
||||
- [SeaweedFS White Paper](https://github.com/seaweedfs/seaweedfs/wiki/SeaweedFS_Architecture.pdf)
|
||||
- [SeaweedFS Introduction Slides 2025.5](https://docs.google.com/presentation/d/1tdkp45J01oRV68dIm4yoTXKJDof-EhainlA0LMXexQE/edit?usp=sharing)
|
||||
- [SeaweedFS Introduction Slides 2021.5](https://docs.google.com/presentation/d/1DcxKWlINc-HNCjhYeERkpGXXm6nTCES8mi2W5G0Z4Ts/edit?usp=sharing)
|
||||
- [SeaweedFS Introduction Slides 2019.3](https://www.slideshare.net/chrislusf/seaweedfs-introduction)
|
||||
- [Wiki Documentation](https://github.com/chrislusf/seaweedfs/wiki)
|
||||
|
||||
Table of Contents
|
||||
=================
|
||||
|
||||
* [Quick Start](#quick-start)
|
||||
* [Quick Start for S3 API on Docker](#quick-start-for-s3-api-on-docker)
|
||||
* [Quick Start with Single Binary](#quick-start-with-single-binary)
|
||||
* [Quick Start SeaweedFS S3 on AWS](#quick-start-seaweedfs-s3-on-aws)
|
||||
* [Introduction](#introduction)
|
||||
* [Features](#features)
|
||||
* [Additional Features](#additional-features)
|
||||
* [Filer Features](#filer-features)
|
||||
* [Example: Using Seaweed Object Store](#example-using-seaweed-object-store)
|
||||
* [Architecture](#object-store-architecture)
|
||||
* [Compared to Other File Systems](#compared-to-other-file-systems)
|
||||
* [Compared to HDFS](#compared-to-hdfs)
|
||||
* [Compared to GlusterFS, Ceph](#compared-to-glusterfs-ceph)
|
||||
* [Compared to GlusterFS](#compared-to-glusterfs)
|
||||
* [Compared to Ceph](#compared-to-ceph)
|
||||
* [Compared to Minio](#compared-to-minio)
|
||||
* [Dev Plan](#dev-plan)
|
||||
* [Installation Guide](#installation-guide)
|
||||
* [Disk Related Topics](#disk-related-topics)
|
||||
* [Benchmark](#benchmark)
|
||||
* [Enterprise](#enterprise)
|
||||
* [License](#license)
|
||||
|
||||
# Quick Start #
|
||||
|
||||
## Quick Start for S3 API on Docker ##
|
||||
|
||||
`docker run -p 8333:8333 chrislusf/seaweedfs server -s3`
|
||||
|
||||
## Quick Start with Single Binary ##
|
||||
* Download the latest binary from https://github.com/seaweedfs/seaweedfs/releases and unzip a single binary file `weed` or `weed.exe`. Or run `go install github.com/seaweedfs/seaweedfs/weed@latest`.
|
||||
* `export AWS_ACCESS_KEY_ID=admin ; export AWS_SECRET_ACCESS_KEY=key` as the admin credentials to access the object store.
|
||||
* Run `weed server -dir=/some/data/dir -s3` to start one master, one volume server, one filer, and one S3 gateway.
|
||||
|
||||
Also, to increase capacity, just add more volume servers by running `weed volume -dir="/some/data/dir2" -mserver="<master_host>:9333" -port=8081` locally, or on a different machine, or on thousands of machines. That is it!
|
||||
|
||||
## Quick Start SeaweedFS S3 on AWS ##
|
||||
* Setup fast production-ready [SeaweedFS S3 on AWS with cloudformation](https://aws.amazon.com/marketplace/pp/prodview-nzelz5gprlrjc)
|
||||
|
||||
# Introduction #
|
||||
## Introduction
|
||||
|
||||
SeaweedFS is a simple and highly scalable distributed file system. There are two objectives:
|
||||
|
||||
1. to store billions of files!
|
||||
2. to serve the files fast!
|
||||
|
||||
SeaweedFS started as an Object Store to handle small files efficiently.
|
||||
Instead of managing all file metadata in a central master,
|
||||
the central master only manages volumes on volume servers,
|
||||
and these volume servers manage files and their metadata.
|
||||
This relieves concurrency pressure from the central master and spreads file metadata into volume servers,
|
||||
allowing faster file access (O(1), usually just one disk read operation).
|
||||
SeaweedFS started as an Object Store to handle small files efficiently. Instead of managing all file metadata in a central master, the central master only manages file volumes, and it lets these volume servers manage files and their metadata. This relieves concurrency pressure from the central master and spreads file metadata into volume servers, allowing faster file access (just one disk read operation).
|
||||
|
||||
There is only 40 bytes of disk storage overhead for each file's metadata.
|
||||
It is so simple with O(1) disk reads that you are welcome to challenge the performance with your actual use cases.
|
||||
There is only a 40 bytes disk storage overhead for each file's metadata. It is so simple with O(1) disk read that you are welcome to challenge the performance with your actual use cases.
|
||||
|
||||
SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf).
|
||||
Also, SeaweedFS implements erasure coding with ideas from
|
||||
[f4: Facebook’s Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf), and has a lot of similarities with [Facebook’s Tectonic Filesystem](https://www.usenix.org/system/files/fast21-pan.pdf)
|
||||
|
||||
On top of the object store, optional [Filer] can support directories and POSIX attributes.
|
||||
Filer is a separate linearly-scalable stateless server with customizable metadata stores,
|
||||
e.g., MySql, Postgres, Redis, Cassandra, HBase, Mongodb, Elastic Search, LevelDB, RocksDB, Sqlite, MemSql, TiDB, Etcd, CockroachDB, YDB, etc.
|
||||
SeaweedFS can work very well with just the object store. [[Filer]] is added later to support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql/Postgres/Redis/Cassandra/LevelDB.
|
||||
|
||||
For any distributed key value stores, the large values can be offloaded to SeaweedFS.
|
||||
With the fast access speed and linearly scalable capacity,
|
||||
SeaweedFS can work as a distributed [Key-Large-Value store][KeyLargeValueStore].
|
||||
## Additional Features
|
||||
* Can choose no replication or different replication level, rack and data center aware
|
||||
* Automatic master servers failover - no single point of failure (SPOF)
|
||||
* Automatic Gzip compression depending on file mime type
|
||||
* Automatic compaction to reclaimed disk spaces after deletion or update
|
||||
* Servers in the same cluster can have different disk spaces, file systems, OS etc.
|
||||
* Adding/Removing servers does **not** cause any data re-balancing
|
||||
* Optionally fix the orientation for jpeg pictures
|
||||
* Support Etag, Accept-Range, Last-Modified, etc.
|
||||
* Support in-memory/leveldb/boltdb/btree mode tuning for memory/performance balance.
|
||||
## Filer Features
|
||||
* [filer server][Filer] provide "normal" directories and files via http.
|
||||
* [mount filer][Mount] to read and write files directly as a local directory via FUSE.
|
||||
* [Amazon S3 compatible API][AmazonS3API] to access files with S3 tooling.
|
||||
* [Async Backup To Cloud][BackupToCloud] can enjoy extreme fast local access and backup to Amazon S3, Google Cloud Storage, Azure, BackBlaze.
|
||||
|
||||
SeaweedFS can transparently integrate with the cloud.
|
||||
With hot data on local cluster, and warm data on the cloud with O(1) access time,
|
||||
SeaweedFS can achieve both fast local access time and elastic cloud storage capacity.
|
||||
What's more, the cloud storage access API cost is minimized.
|
||||
Faster and cheaper than direct cloud storage!
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
# Features #
|
||||
## Additional Features ##
|
||||
* Can choose no replication or different replication levels, rack and data center aware.
|
||||
* Automatic master servers failover - no single point of failure (SPOF).
|
||||
* Automatic Gzip compression depending on file MIME type.
|
||||
* Automatic compaction to reclaim disk space after deletion or update.
|
||||
* [Automatic entry TTL expiration][VolumeServerTTL].
|
||||
* Any server with some disk space can add to the total storage space.
|
||||
* Adding/Removing servers does **not** cause any data re-balancing unless triggered by admin commands.
|
||||
* Optional picture resizing.
|
||||
* Support ETag, Accept-Range, Last-Modified, etc.
|
||||
* Support in-memory/leveldb/readonly mode tuning for memory/performance balance.
|
||||
* Support rebalancing the writable and readonly volumes.
|
||||
* [Customizable Multiple Storage Tiers][TieredStorage]: Customizable storage disk types to balance performance and cost.
|
||||
* [Transparent cloud integration][CloudTier]: unlimited capacity via tiered cloud storage for warm data.
|
||||
* [Erasure Coding for warm storage][ErasureCoding] Rack-Aware 10.4 erasure coding reduces storage cost and increases availability.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Filer Features ##
|
||||
* [Filer server][Filer] provides "normal" directories and files via HTTP.
|
||||
* [File TTL][FilerTTL] automatically expires file metadata and actual file data.
|
||||
* [Mount filer][Mount] reads and writes files directly as a local directory via FUSE.
|
||||
* [Filer Store Replication][FilerStoreReplication] enables HA for filer meta data stores.
|
||||
* [Active-Active Replication][ActiveActiveAsyncReplication] enables asynchronous one-way or two-way cross cluster continuous replication.
|
||||
* [Amazon S3 compatible API][AmazonS3API] accesses files with S3 tooling.
|
||||
* [Hadoop Compatible File System][Hadoop] accesses files from Hadoop/Spark/Flink/etc or even runs HBase.
|
||||
* [Async Replication To Cloud][BackupToCloud] has extremely fast local access and backups to Amazon S3, Google Cloud Storage, Azure, BackBlaze.
|
||||
* [WebDAV] accesses as a mapped drive on Mac and Windows, or from mobile devices.
|
||||
* [AES256-GCM Encrypted Storage][FilerDataEncryption] safely stores the encrypted data.
|
||||
* [Super Large Files][SuperLargeFiles] stores large or super large files in tens of TB.
|
||||
* [Cloud Drive][CloudDrive] mounts cloud storage to local cluster, cached for fast read and write with asynchronous write back.
|
||||
* [Gateway to Remote Object Store][GatewayToRemoteObjectStore] mirrors bucket operations to remote object storage, in addition to [Cloud Drive][CloudDrive]
|
||||
|
||||
## Kubernetes ##
|
||||
* [Kubernetes CSI Driver][SeaweedFsCsiDriver] A Container Storage Interface (CSI) Driver. [](https://hub.docker.com/r/chrislusf/seaweedfs-csi-driver/)
|
||||
* [SeaweedFS Operator](https://github.com/seaweedfs/seaweedfs-operator)
|
||||
|
||||
[Filer]: https://github.com/seaweedfs/seaweedfs/wiki/Directories-and-Files
|
||||
[SuperLargeFiles]: https://github.com/seaweedfs/seaweedfs/wiki/Data-Structure-for-Large-Files
|
||||
[Mount]: https://github.com/seaweedfs/seaweedfs/wiki/FUSE-Mount
|
||||
[AmazonS3API]: https://github.com/seaweedfs/seaweedfs/wiki/Amazon-S3-API
|
||||
[BackupToCloud]: https://github.com/seaweedfs/seaweedfs/wiki/Async-Replication-to-Cloud
|
||||
[Hadoop]: https://github.com/seaweedfs/seaweedfs/wiki/Hadoop-Compatible-File-System
|
||||
[WebDAV]: https://github.com/seaweedfs/seaweedfs/wiki/WebDAV
|
||||
[ErasureCoding]: https://github.com/seaweedfs/seaweedfs/wiki/Erasure-coding-for-warm-storage
|
||||
[TieredStorage]: https://github.com/seaweedfs/seaweedfs/wiki/Tiered-Storage
|
||||
[CloudTier]: https://github.com/seaweedfs/seaweedfs/wiki/Cloud-Tier
|
||||
[FilerDataEncryption]: https://github.com/seaweedfs/seaweedfs/wiki/Filer-Data-Encryption
|
||||
[FilerTTL]: https://github.com/seaweedfs/seaweedfs/wiki/Filer-Stores
|
||||
[VolumeServerTTL]: https://github.com/seaweedfs/seaweedfs/wiki/Store-file-with-a-Time-To-Live
|
||||
[SeaweedFsCsiDriver]: https://github.com/seaweedfs/seaweedfs-csi-driver
|
||||
[ActiveActiveAsyncReplication]: https://github.com/seaweedfs/seaweedfs/wiki/Filer-Active-Active-cross-cluster-continuous-synchronization
|
||||
[FilerStoreReplication]: https://github.com/seaweedfs/seaweedfs/wiki/Filer-Store-Replication
|
||||
[KeyLargeValueStore]: https://github.com/seaweedfs/seaweedfs/wiki/Filer-as-a-Key-Large-Value-Store
|
||||
[CloudDrive]: https://github.com/seaweedfs/seaweedfs/wiki/Cloud-Drive-Architecture
|
||||
[GatewayToRemoteObjectStore]: https://github.com/seaweedfs/seaweedfs/wiki/Gateway-to-Remote-Object-Storage
|
||||
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Example: Using Seaweed Object Store ##
|
||||
[Filer]: https://github.com/chrislusf/seaweedfs/wiki/Directories-and-Files
|
||||
[Mount]: https://github.com/chrislusf/seaweedfs/wiki/Mount
|
||||
[AmazonS3API]: https://github.com/chrislusf/seaweedfs/wiki/Amazon-S3-API
|
||||
[BackupToCloud]: https://github.com/chrislusf/seaweedfs/wiki/Backup-to-Cloud
|
||||
|
||||
## Example Usage
|
||||
By default, the master node runs on port 9333, and the volume nodes run on port 8080.
|
||||
Let's start one master node, and two volume nodes on port 8080 and 8081. Ideally, they should be started from different machines. We'll use localhost as an example.
|
||||
Here I will start one master node, and two volume nodes on port 8080 and 8081. Ideally, they should be started from different machines. I just use localhost as an example.
|
||||
|
||||
SeaweedFS uses HTTP REST operations to read, write, and delete. The responses are in JSON or JSONP format.
|
||||
SeaweedFS uses HTTP REST operations to write, read, delete. The responses are in JSON or JSONP format.
|
||||
|
||||
### Start Master Server ###
|
||||
### Start Master Server
|
||||
|
||||
```
|
||||
> ./weed master
|
||||
|
@ -210,9 +107,10 @@ SeaweedFS uses HTTP REST operations to read, write, and delete. The responses ar
|
|||
> weed volume -dir="/tmp/data2" -max=10 -mserver="localhost:9333" -port=8081 &
|
||||
```
|
||||
|
||||
|
||||
### Write File ###
|
||||
|
||||
To upload a file: first, send a HTTP POST, PUT, or GET request to `/dir/assign` to get an `fid` and a volume server URL:
|
||||
To upload a file: first, send a HTTP POST, PUT, or GET request to `/dir/assign` to get an `fid` and a volume server url:
|
||||
|
||||
```
|
||||
> curl http://localhost:9333/dir/assign
|
||||
|
@ -223,7 +121,7 @@ Second, to store the file content, send a HTTP multi-part POST request to `url +
|
|||
|
||||
```
|
||||
> curl -F file=@/home/chris/myphoto.jpg http://127.0.0.1:8080/3,01637037d6
|
||||
{"name":"myphoto.jpg","size":43234,"eTag":"1cc0118e"}
|
||||
{"size": 43234}
|
||||
```
|
||||
|
||||
To update, send another POST request with updated file content.
|
||||
|
@ -233,20 +131,19 @@ For deletion, send an HTTP DELETE request to the same `url + '/' + fid` URL:
|
|||
```
|
||||
> curl -X DELETE http://127.0.0.1:8080/3,01637037d6
|
||||
```
|
||||
|
||||
### Save File Id ###
|
||||
|
||||
Now, you can save the `fid`, 3,01637037d6 in this case, to a database field.
|
||||
Now you can save the `fid`, 3,01637037d6 in this case, to a database field.
|
||||
|
||||
The number 3 at the start represents a volume id. After the comma, it's one file key, 01, and a file cookie, 637037d6.
|
||||
|
||||
The volume id is an unsigned 32-bit integer. The file key is an unsigned 64-bit integer. The file cookie is an unsigned 32-bit integer, used to prevent URL guessing.
|
||||
|
||||
The file key and file cookie are both coded in hex. You can store the <volume id, file key, file cookie> tuple in your own format, or simply store the `fid` as a string.
|
||||
The file key and file cookie are both coded in hex. You can store the <volume id, file key, file cookie> tuple in your own format, or simply store the `fid` as string.
|
||||
|
||||
If stored as a string, in theory, you would need 8+1+16+8=33 bytes. A char(33) would be enough, if not more than enough, since most uses will not need 2^32 volumes.
|
||||
If stored as a string, in theory, you would need 8+1+16+8=33 bytes. A char(33) would be enough, if not more than enough, since most usage would not need 2^32 volumes.
|
||||
|
||||
If space is really a concern, you can store the file id in your own format. You would need one 4-byte integer for volume id, 8-byte long number for file key, and a 4-byte integer for the file cookie. So 16 bytes are more than enough.
|
||||
If space is really a concern, you can store the file id in your own format. You would need one 4-byte integer for volume id, 8-byte long number for file key, 4-byte integer for file cookie. So 16 bytes are more than enough.
|
||||
|
||||
### Read File ###
|
||||
|
||||
|
@ -256,12 +153,12 @@ First look up the volume server's URLs by the file's volumeId:
|
|||
|
||||
```
|
||||
> curl http://localhost:9333/dir/lookup?volumeId=3
|
||||
{"volumeId":"3","locations":[{"publicUrl":"localhost:8080","url":"localhost:8080"}]}
|
||||
{"locations":[{"publicUrl":"localhost:8080","url":"localhost:8080"}]}
|
||||
```
|
||||
|
||||
Since (usually) there are not too many volume servers, and volumes don't move often, you can cache the results most of the time. Depending on the replication type, one volume can have multiple replica locations. Just randomly pick one location to read.
|
||||
|
||||
Now you can take the public URL, render the URL or directly read from the volume server via URL:
|
||||
Now you can take the public url, render the url or directly read from the volume server via url:
|
||||
|
||||
```
|
||||
http://localhost:8080/3,01637037d6.jpg
|
||||
|
@ -308,13 +205,13 @@ The replication parameter options are:
|
|||
|
||||
More details about replication can be found [on the wiki][Replication].
|
||||
|
||||
[Replication]: https://github.com/seaweedfs/seaweedfs/wiki/Replication
|
||||
[Replication]: https://github.com/chrislusf/seaweedfs/wiki/Replication
|
||||
|
||||
You can also set the default replication strategy when starting the master server.
|
||||
|
||||
### Allocate File Key on Specific Data Center ###
|
||||
### Allocate File Key on specific data center ###
|
||||
|
||||
Volume servers can be started with a specific data center name:
|
||||
Volume servers can start with a specific data center name:
|
||||
|
||||
```
|
||||
weed volume -dir=/tmp/1 -port=8080 -dataCenter=dc1
|
||||
|
@ -333,20 +230,18 @@ When requesting a file key, an optional "dataCenter" parameter can limit the ass
|
|||
* [Chunking large files][feat-3]
|
||||
* [Collection as a Simple Name Space][feat-4]
|
||||
|
||||
[feat-1]: https://github.com/seaweedfs/seaweedfs/wiki/Failover-Master-Server
|
||||
[feat-2]: https://github.com/seaweedfs/seaweedfs/wiki/Optimization#insert-with-your-own-keys
|
||||
[feat-3]: https://github.com/seaweedfs/seaweedfs/wiki/Optimization#upload-large-files
|
||||
[feat-4]: https://github.com/seaweedfs/seaweedfs/wiki/Optimization#collection-as-a-simple-name-space
|
||||
[feat-1]: https://github.com/chrislusf/seaweedfs/wiki/Failover-Master-Server
|
||||
[feat-2]: https://github.com/chrislusf/seaweedfs/wiki/Optimization#insert-with-your-own-keys
|
||||
[feat-3]: https://github.com/chrislusf/seaweedfs/wiki/Optimization#upload-large-files
|
||||
[feat-4]: https://github.com/chrislusf/seaweedfs/wiki/Optimization#collection-as-a-simple-name-space
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Object Store Architecture ##
|
||||
## Architecture ##
|
||||
|
||||
Usually distributed file systems split each file into chunks, a central master keeps a mapping of filenames, chunk indices to chunk handles, and also which chunks each chunk server has.
|
||||
|
||||
The main drawback is that the central master can't handle many small files efficiently, and since all read requests need to go through the chunk master, so it might not scale well for many concurrent users.
|
||||
The main drawback is that the central master can't handle many small files efficiently, and since all read requests need to go through the chunk master, might not scale well for many concurrent users.
|
||||
|
||||
Instead of managing chunks, SeaweedFS manages data volumes in the master server. Each data volume is 32GB in size, and can hold a lot of files. And each storage node can have many data volumes. So the master node only needs to store the metadata about the volumes, which is a fairly small amount of data and is generally stable.
|
||||
Instead of managing chunks, SeaweedFS manages data volumes in the master server. Each data volume is size 32GB, and can hold a lot of files. And each storage node can have many data volumes. So the master node only needs to store the metadata about the volumes, which is fairly small amount of data and is generally stable.
|
||||
|
||||
The actual file metadata is stored in each volume on volume servers. Since each volume server only manages metadata of files on its own disk, with only 16 bytes for each file, all file access can read file metadata just from memory and only needs one disk operation to actually read file data.
|
||||
|
||||
|
@ -356,51 +251,36 @@ For comparison, consider that an xfs inode structure in Linux is 536 bytes.
|
|||
|
||||
The architecture is fairly simple. The actual data is stored in volumes on storage nodes. One volume server can have multiple volumes, and can both support read and write access with basic authentication.
|
||||
|
||||
All volumes are managed by a master server. The master server contains the volume id to volume server mapping. This is fairly static information, and can be easily cached.
|
||||
All volumes are managed by a master server. The master server contains volume id to volume server mapping. This is fairly static information, and could be cached easily.
|
||||
|
||||
On each write request, the master server also generates a file key, which is a growing 64-bit unsigned integer. Since write requests are not generally as frequent as read requests, one master server should be able to handle the concurrency well.
|
||||
|
||||
### Write and Read files ###
|
||||
|
||||
When a client sends a write request, the master server returns (volume id, file key, file cookie, volume node URL) for the file. The client then contacts the volume node and POSTs the file content.
|
||||
When a client sends a write request, the master server returns (volume id, file key, file cookie, volume node url) for the file. The client then contacts the volume node and POSTs the file content.
|
||||
|
||||
When a client needs to read a file based on (volume id, file key, file cookie), it asks the master server by the volume id for the (volume node URL, volume node public URL), or retrieves this from a cache. Then the client can GET the content, or just render the URL on web pages and let browsers fetch the content.
|
||||
When a client needs to read a file based on (volume id, file key, file cookie), it can ask the master server by the volume id for the (volume node url, volume node public url), or retrieve this from a cache. Then the client can GET the content, or just render the URL on web pages and let browsers fetch the content.
|
||||
|
||||
Please see the example for details on the write-read process.
|
||||
|
||||
### Storage Size ###
|
||||
|
||||
In the current implementation, each volume can hold 32 gibibytes (32GiB or 8x2^32 bytes). This is because we align content to 8 bytes. We can easily increase this to 64GiB, or 128GiB, or more, by changing 2 lines of code, at the cost of some wasted padding space due to alignment.
|
||||
In the current implementation, each volume can be 8x2^32 bytes (32GiB). This is because of we align content to 8 bytes. We can easily increase this to 64G, or 128G, or more, by changing 2 lines of code, at the cost of some wasted padding space due to alignment.
|
||||
|
||||
There can be 4 gibibytes (4GiB or 2^32 bytes) of volumes. So the total system size is 8 x 4GiB x 4GiB which is 128 exbibytes (128EiB or 2^67 bytes).
|
||||
There can be 2^32 volumes. So total system size is 8 x 2^32 bytes x 2^32 = 8 x 4GiB x 4Gi = 128EiB (2^67 bytes, or 128 exbibytes).
|
||||
|
||||
Each individual file size is limited to the volume size.
|
||||
|
||||
### Saving memory ###
|
||||
|
||||
All file meta information stored on a volume server is readable from memory without disk access. Each file takes just a 16-byte map entry of <64bit key, 32bit offset, 32bit size>. Of course, each map entry has its own space cost for the map. But usually the disk space runs out before the memory does.
|
||||
All file meta information on volume server is readable from memory without disk access. Each file just takes an 16-byte map entry of <64bit key, 32bit offset, 32bit size>. Of course, each map entry has its own the space cost for the map. But usually the disk runs out before the memory does.
|
||||
|
||||
### Tiered Storage to the cloud ###
|
||||
|
||||
The local volume servers are much faster, while cloud storages have elastic capacity and are actually more cost-efficient if not accessed often (usually free to upload, but relatively costly to access). With the append-only structure and O(1) access time, SeaweedFS can take advantage of both local and cloud storage by offloading the warm data to the cloud.
|
||||
|
||||
Usually hot data are fresh and warm data are old. SeaweedFS puts the newly created volumes on local servers, and optionally upload the older volumes on the cloud. If the older data are accessed less often, this literally gives you unlimited capacity with limited local servers, and still fast for new data.
|
||||
|
||||
With the O(1) access time, the network latency cost is kept at minimum.
|
||||
|
||||
If the hot/warm data is split as 20/80, with 20 servers, you can achieve storage capacity of 100 servers. That's a cost saving of 80%! Or you can repurpose the 80 servers to store new data also, and get 5X storage throughput.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Compared to Other File Systems ##
|
||||
|
||||
Most other distributed file systems seem more complicated than necessary.
|
||||
|
||||
SeaweedFS is meant to be fast and simple, in both setup and operation. If you do not understand how it works when you reach here, we've failed! Please raise an issue with any questions or update this file with clarifications.
|
||||
|
||||
SeaweedFS is constantly moving forward. Same with other systems. These comparisons can be outdated quickly. Please help to keep them updated.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
SeaweedFS is meant to be fast and simple, both during usage and during setup. If you do not understand how it works when you reach here, we failed! Please raise an issue with any questions or update this file with clarifications.
|
||||
|
||||
### Compared to HDFS ###
|
||||
|
||||
|
@ -410,7 +290,6 @@ SeaweedFS is ideal for serving relatively smaller files quickly and concurrently
|
|||
|
||||
SeaweedFS can also store extra large files by splitting them into manageable data chunks, and store the file ids of the data chunks into a meta chunk. This is managed by "weed upload/download" tool, and the weed master or volume servers are agnostic about it.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
### Compared to GlusterFS, Ceph ###
|
||||
|
||||
|
@ -418,19 +297,15 @@ The architectures are mostly the same. SeaweedFS aims to store and read files fa
|
|||
|
||||
* SeaweedFS optimizes for small files, ensuring O(1) disk seek operation, and can also handle large files.
|
||||
* SeaweedFS statically assigns a volume id for a file. Locating file content becomes just a lookup of the volume id, which can be easily cached.
|
||||
* SeaweedFS Filer metadata store can be any well-known and proven data store, e.g., Redis, Cassandra, HBase, Mongodb, Elastic Search, MySql, Postgres, Sqlite, MemSql, TiDB, CockroachDB, Etcd, YDB etc, and is easy to customize.
|
||||
* SeaweedFS Filer metadata store can be any well-known and proven data stores, e.g., Cassandra, Redis, MySql, Postgres, etc, and is easy to customized.
|
||||
* SeaweedFS Volume server also communicates directly with clients via HTTP, supporting range queries, direct uploads, etc.
|
||||
|
||||
| System | File Metadata | File Content Read| POSIX | REST API | Optimized for large number of small files |
|
||||
| System | File Meta | File Content Read| POSIX | REST API | Optimized for small files |
|
||||
| ------------- | ------------------------------- | ---------------- | ------ | -------- | ------------------------- |
|
||||
| SeaweedFS | lookup volume id, cacheable | O(1) disk seek | | Yes | Yes |
|
||||
| SeaweedFS Filer| Linearly Scalable, Customizable | O(1) disk seek | FUSE | Yes | Yes |
|
||||
| GlusterFS | hashing | | FUSE, NFS | | |
|
||||
| Ceph | hashing + rules | | FUSE | Yes | |
|
||||
| MooseFS | in memory | | FUSE | | No |
|
||||
| MinIO | separate meta file for each file | | | Yes | No |
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
### Compared to GlusterFS ###
|
||||
|
||||
|
@ -438,29 +313,19 @@ GlusterFS stores files, both directories and content, in configurable volumes ca
|
|||
|
||||
GlusterFS hashes the path and filename into ids, and assigned to virtual volumes, and then mapped to "bricks".
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
### Compared to MooseFS ###
|
||||
|
||||
MooseFS chooses to neglect small file issue. From moosefs 3.0 manual, "even a small file will occupy 64KiB plus additionally 4KiB of checksums and 1KiB for the header", because it "was initially designed for keeping large amounts (like several thousands) of very big files"
|
||||
|
||||
MooseFS Master Server keeps all meta data in memory. Same issue as HDFS namenode.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
### Compared to Ceph ###
|
||||
|
||||
Ceph can be setup similar to SeaweedFS as a key->blob store. It is much more complicated, with the need to support layers on top of it. [Here is a more detailed comparison](https://github.com/seaweedfs/seaweedfs/issues/120)
|
||||
Ceph can be setup similar to SeaweedFS as a key->blob store. It is much more complicated, with the need to support layers on top of it. [Here is a more detailed comparison](https://github.com/chrislusf/seaweedfs/issues/120)
|
||||
|
||||
SeaweedFS has a centralized master group to look up free volumes, while Ceph uses hashing and metadata servers to locate its objects. Having a centralized master makes it easy to code and manage.
|
||||
|
||||
Ceph, like SeaweedFS, is based on the object store RADOS. Ceph is rather complicated with mixed reviews.
|
||||
Same as SeaweedFS, Ceph is also based on a object store RADOS. Ceph is rather complicated with mixed reviews.
|
||||
|
||||
Ceph uses CRUSH hashing to automatically manage data placement, which is efficient to locate the data. But the data has to be placed according to the CRUSH algorithm. Any wrong configuration would cause data loss. Topology changes, such as adding new servers to increase capacity, will cause data migration with high IO cost to fit the CRUSH algorithm. SeaweedFS places data by assigning them to any writable volumes. If writes to one volume failed, just pick another volume to write. Adding more volumes is also as simple as it can be.
|
||||
Ceph uses CRUSH hashing to automatically manage the data placement. SeaweedFS places data by assigned volumes.
|
||||
|
||||
SeaweedFS is optimized for small files. Small files are stored as one continuous block of content, with at most 8 unused bytes between files. Small file access is O(1) disk read.
|
||||
|
||||
SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Sqlite, Mongodb, Redis, Elastic Search, Cassandra, HBase, MemSql, TiDB, CockroachCB, Etcd, YDB, to manage file directories. These stores are proven, scalable, and easier to manage.
|
||||
SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Redis, Cassandra, to manage file directories. There are proven, scalable, and easier to manage.
|
||||
|
||||
| SeaweedFS | comparable to Ceph | advantage |
|
||||
| ------------- | ------------- | ---------------- |
|
||||
|
@ -468,199 +333,107 @@ SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Sqlite, Mong
|
|||
| Volume | OSD | optimized for small files |
|
||||
| Filer | Ceph FS | linearly scalable, Customizable, O(1) or O(logN) |
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
### Compared to MinIO ###
|
||||
## Dev plan ##
|
||||
|
||||
MinIO follows AWS S3 closely and is ideal for testing for S3 API. It has good UI, policies, versionings, etc. SeaweedFS is trying to catch up here. It is also possible to put MinIO as a gateway in front of SeaweedFS later.
|
||||
More tools and documentation, on how to maintain and scale the system. For example, how to move volumes, automatically balancing data, how to grow volumes, how to check system status, etc.
|
||||
Other key features include: Erasure Encoding, JWT security.
|
||||
|
||||
MinIO metadata are in simple files. Each file write will incur extra writes to corresponding meta file.
|
||||
This is a super exciting project! And I need helpers and [support](https://www.patreon.com/seaweedfs)!
|
||||
|
||||
MinIO does not have optimization for lots of small files. The files are simply stored as is to local disks.
|
||||
Plus the extra meta file and shards for erasure coding, it only amplifies the LOSF problem.
|
||||
|
||||
MinIO has multiple disk IO to read one file. SeaweedFS has O(1) disk reads, even for erasure coded files.
|
||||
## Installation guide for users who are not familiar with golang
|
||||
|
||||
MinIO has full-time erasure coding. SeaweedFS uses replication on hot data for faster speed and optionally applies erasure coding on warm data.
|
||||
|
||||
MinIO does not have POSIX-like API support.
|
||||
|
||||
MinIO has specific requirements on storage layout. It is not flexible to adjust capacity. In SeaweedFS, just start one volume server pointing to the master. That's all.
|
||||
|
||||
## Dev Plan ##
|
||||
|
||||
* More tools and documentation, on how to manage and scale the system.
|
||||
* Read and write stream data.
|
||||
* Support structured data.
|
||||
|
||||
This is a super exciting project! And we need helpers and [support](https://www.patreon.com/seaweedfs)!
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Installation Guide ##
|
||||
|
||||
> Installation guide for users who are not familiar with golang
|
||||
|
||||
Step 1: install go on your machine and setup the environment by following the instructions at:
|
||||
step 1: install go on your machine and setup the environment by following the instructions from the following link:
|
||||
|
||||
https://golang.org/doc/install
|
||||
|
||||
make sure to define your $GOPATH
|
||||
make sure you set up your $GOPATH
|
||||
|
||||
|
||||
Step 2: checkout this repo:
|
||||
```bash
|
||||
git clone https://github.com/seaweedfs/seaweedfs.git
|
||||
```
|
||||
Step 3: download, compile, and install the project by executing the following command
|
||||
step 2: also you may need to install Mercurial by following the instructions below
|
||||
|
||||
http://mercurial.selenic.com/downloads
|
||||
|
||||
step 3: download, compile, and install the project by executing the following command
|
||||
|
||||
```bash
|
||||
cd seaweedfs/weed && make install
|
||||
go get github.com/chrislusf/seaweedfs/weed
|
||||
```
|
||||
|
||||
Once this is done, you will find the executable "weed" in your `$GOPATH/bin` directory
|
||||
once this is done, you should see the executable "weed" under `$GOPATH/bin`
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
step 4: after you modify your code locally, you could start a local build by calling `go install` under
|
||||
|
||||
## Disk Related Topics ##
|
||||
```
|
||||
$GOPATH/src/github.com/chrislusf/seaweedfs/weed
|
||||
```
|
||||
|
||||
## Disk Related topics ##
|
||||
|
||||
### Hard Drive Performance ###
|
||||
|
||||
When testing read performance on SeaweedFS, it basically becomes a performance test of your hard drive's random read speed. Hard drives usually get 100MB/s~200MB/s.
|
||||
When testing read performance on SeaweedFS, it basically becomes performance test for your hard drive's random read speed. Hard Drive usually get 100MB/s~200MB/s.
|
||||
|
||||
### Solid State Disk ###
|
||||
### Solid State Disk
|
||||
|
||||
To modify or delete small files, SSD must delete a whole block at a time, and move content in existing blocks to a new block. SSD is fast when brand new, but will get fragmented over time and you have to garbage collect, compacting blocks. SeaweedFS is friendly to SSD since it is append-only. Deletion and compaction are done on volume level in the background, not slowing reading and not causing fragmentation.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Benchmark ##
|
||||
## Benchmark
|
||||
|
||||
My Own Unscientific Single Machine Results on Mac Book with Solid State Disk, CPU: 1 Intel Core i7 2.6GHz.
|
||||
|
||||
Write 1 million 1KB file:
|
||||
```
|
||||
Concurrency Level: 16
|
||||
Time taken for tests: 66.753 seconds
|
||||
Completed requests: 1048576
|
||||
Time taken for tests: 88.796 seconds
|
||||
Complete requests: 1048576
|
||||
Failed requests: 0
|
||||
Total transferred: 1106789009 bytes
|
||||
Requests per second: 15708.23 [#/sec]
|
||||
Transfer rate: 16191.69 [Kbytes/sec]
|
||||
Total transferred: 1106764659 bytes
|
||||
Requests per second: 11808.87 [#/sec]
|
||||
Transfer rate: 12172.05 [Kbytes/sec]
|
||||
|
||||
Connection Times (ms)
|
||||
min avg max std
|
||||
Total: 0.3 1.0 84.3 0.9
|
||||
Total: 0.2 1.3 44.8 0.9
|
||||
|
||||
Percentage of the requests served within a certain time (ms)
|
||||
50% 0.8 ms
|
||||
66% 1.0 ms
|
||||
75% 1.1 ms
|
||||
80% 1.2 ms
|
||||
90% 1.4 ms
|
||||
95% 1.7 ms
|
||||
98% 2.1 ms
|
||||
99% 2.6 ms
|
||||
100% 84.3 ms
|
||||
50% 1.1 ms
|
||||
66% 1.3 ms
|
||||
75% 1.5 ms
|
||||
80% 1.7 ms
|
||||
90% 2.1 ms
|
||||
95% 2.6 ms
|
||||
98% 3.7 ms
|
||||
99% 4.6 ms
|
||||
100% 44.8 ms
|
||||
```
|
||||
|
||||
Randomly read 1 million files:
|
||||
```
|
||||
Concurrency Level: 16
|
||||
Time taken for tests: 22.301 seconds
|
||||
Completed requests: 1048576
|
||||
Time taken for tests: 34.263 seconds
|
||||
Complete requests: 1048576
|
||||
Failed requests: 0
|
||||
Total transferred: 1106812873 bytes
|
||||
Requests per second: 47019.38 [#/sec]
|
||||
Transfer rate: 48467.57 [Kbytes/sec]
|
||||
Total transferred: 1106762945 bytes
|
||||
Requests per second: 30603.34 [#/sec]
|
||||
Transfer rate: 31544.49 [Kbytes/sec]
|
||||
|
||||
Connection Times (ms)
|
||||
min avg max std
|
||||
Total: 0.0 0.3 54.1 0.2
|
||||
Total: 0.0 0.5 20.7 0.7
|
||||
|
||||
Percentage of the requests served within a certain time (ms)
|
||||
50% 0.3 ms
|
||||
90% 0.4 ms
|
||||
98% 0.6 ms
|
||||
99% 0.7 ms
|
||||
100% 54.1 ms
|
||||
50% 0.4 ms
|
||||
75% 0.5 ms
|
||||
95% 0.6 ms
|
||||
98% 0.8 ms
|
||||
99% 1.2 ms
|
||||
100% 20.7 ms
|
||||
```
|
||||
|
||||
### Run WARP and launch a mixed benchmark. ###
|
||||
|
||||
```
|
||||
make benchmark
|
||||
warp: Benchmark data written to "warp-mixed-2023-10-16[102354]-l70a.csv.zst"
|
||||
Mixed operations.
|
||||
Operation: DELETE, 10%, Concurrency: 20, Ran 4m59s.
|
||||
* Throughput: 6.19 obj/s
|
||||
|
||||
Operation: GET, 45%, Concurrency: 20, Ran 5m0s.
|
||||
* Throughput: 279.85 MiB/s, 27.99 obj/s
|
||||
|
||||
Operation: PUT, 15%, Concurrency: 20, Ran 5m0s.
|
||||
* Throughput: 89.86 MiB/s, 8.99 obj/s
|
||||
|
||||
Operation: STAT, 30%, Concurrency: 20, Ran 5m0s.
|
||||
* Throughput: 18.63 obj/s
|
||||
|
||||
Cluster Total: 369.74 MiB/s, 61.79 obj/s, 0 errors over 5m0s.
|
||||
```
|
||||
|
||||
To see segmented request statistics, use the --analyze.v parameter.
|
||||
```
|
||||
warp analyze --analyze.v warp-mixed-2023-10-16[102354]-l70a.csv.zst
|
||||
18642 operations loaded... Done!
|
||||
Mixed operations.
|
||||
----------------------------------------
|
||||
Operation: DELETE - total: 1854, 10.0%, Concurrency: 20, Ran 5m0s, starting 2023-10-16 10:23:57.115 +0500 +05
|
||||
* Throughput: 6.19 obj/s
|
||||
|
||||
Requests considered: 1855:
|
||||
* Avg: 104ms, 50%: 30ms, 90%: 207ms, 99%: 1.355s, Fastest: 1ms, Slowest: 4.613s, StdDev: 320ms
|
||||
|
||||
----------------------------------------
|
||||
Operation: GET - total: 8388, 45.3%, Size: 10485760 bytes. Concurrency: 20, Ran 5m0s, starting 2023-10-16 10:23:57.12 +0500 +05
|
||||
* Throughput: 279.77 MiB/s, 27.98 obj/s
|
||||
|
||||
Requests considered: 8389:
|
||||
* Avg: 221ms, 50%: 106ms, 90%: 492ms, 99%: 1.739s, Fastest: 8ms, Slowest: 8.633s, StdDev: 383ms
|
||||
* TTFB: Avg: 81ms, Best: 2ms, 25th: 24ms, Median: 39ms, 75th: 65ms, 90th: 171ms, 99th: 669ms, Worst: 4.783s StdDev: 163ms
|
||||
* First Access: Avg: 240ms, 50%: 105ms, 90%: 511ms, 99%: 2.08s, Fastest: 12ms, Slowest: 8.633s, StdDev: 480ms
|
||||
* First Access TTFB: Avg: 88ms, Best: 2ms, 25th: 24ms, Median: 38ms, 75th: 64ms, 90th: 179ms, 99th: 919ms, Worst: 4.783s StdDev: 199ms
|
||||
* Last Access: Avg: 219ms, 50%: 106ms, 90%: 463ms, 99%: 1.782s, Fastest: 9ms, Slowest: 8.633s, StdDev: 416ms
|
||||
* Last Access TTFB: Avg: 81ms, Best: 2ms, 25th: 24ms, Median: 39ms, 75th: 65ms, 90th: 161ms, 99th: 657ms, Worst: 4.783s StdDev: 176ms
|
||||
|
||||
----------------------------------------
|
||||
Operation: PUT - total: 2688, 14.5%, Size: 10485760 bytes. Concurrency: 20, Ran 5m0s, starting 2023-10-16 10:23:57.115 +0500 +05
|
||||
* Throughput: 89.83 MiB/s, 8.98 obj/s
|
||||
|
||||
Requests considered: 2689:
|
||||
* Avg: 1.165s, 50%: 878ms, 90%: 2.015s, 99%: 5.74s, Fastest: 99ms, Slowest: 8.264s, StdDev: 968ms
|
||||
|
||||
----------------------------------------
|
||||
Operation: STAT - total: 5586, 30.2%, Concurrency: 20, Ran 5m0s, starting 2023-10-16 10:23:57.113 +0500 +05
|
||||
* Throughput: 18.63 obj/s
|
||||
|
||||
Requests considered: 5587:
|
||||
* Avg: 15ms, 50%: 11ms, 90%: 34ms, 99%: 80ms, Fastest: 0s, Slowest: 245ms, StdDev: 17ms
|
||||
* First Access: Avg: 14ms, 50%: 10ms, 90%: 33ms, 99%: 69ms, Fastest: 0s, Slowest: 203ms, StdDev: 16ms
|
||||
* Last Access: Avg: 15ms, 50%: 11ms, 90%: 34ms, 99%: 74ms, Fastest: 0s, Slowest: 203ms, StdDev: 17ms
|
||||
|
||||
Cluster Total: 369.64 MiB/s, 61.77 obj/s, 0 errors over 5m0s.
|
||||
Total Errors:0.
|
||||
```
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Enterprise ##
|
||||
|
||||
For enterprise users, please visit [seaweedfs.com](https://seaweedfs.com) for the SeaweedFS Enterprise Edition,
|
||||
which has a self-healing storage format with better data protection.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## License ##
|
||||
## License
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -674,10 +447,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
The text of this page is available for modification and reuse under the terms of the Creative Commons Attribution-Sharealike 3.0 Unported License and the GNU Free Documentation License (unversioned, with no invariant sections, front-cover texts, or back-cover texts).
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Stargazers over time
|
||||
|
||||
[](https://starchart.cc/chrislusf/seaweedfs)
|
||||
[](https://starcharts.herokuapp.com/chrislusf/seaweedfs)
|
||||
|
|
13
backers.md
13
backers.md
|
@ -1,4 +1,3 @@
|
|||
|
||||
<h1 align="center">Sponsors & Backers</h1>
|
||||
|
||||
- [Become a backer or sponsor on Patreon](https://www.patreon.com/seaweedfs).
|
||||
|
@ -6,18 +5,8 @@
|
|||
<h2 align="center">Generous Backers ($50+)</h2>
|
||||
|
||||
- [Evercam Camera Management Software](https://evercam.io/)
|
||||
- [Spherical Elephant GmbH](https://www.sphericalelephant.com)
|
||||
- [WizardTales GmbH](https://www.wizardtales.com)
|
||||
- [Nimbus Web Services](https://nimbusws.com)
|
||||
|
||||
- <h2 align="center">Backers</h2>
|
||||
<h2 align="center">Backers</h2>
|
||||
|
||||
- [ColorfulClouds Tech Co. Ltd.](https://caiyunai.com/)
|
||||
- [Haravan - Ecommerce Platform](https://www.haravan.com)
|
||||
- PeterCxy - Creator of Shelter App
|
||||
- [Hive Games](https://playhive.com/)
|
||||
- Flowm
|
||||
- Yoni Nakache
|
||||
- Catalin Constantin
|
||||
- MingLi Yuan
|
||||
- Leroy van Logchem
|
||||
|
|
27
docker/Dockerfile
Normal file
27
docker/Dockerfile
Normal file
|
@ -0,0 +1,27 @@
|
|||
FROM frolvlad/alpine-glibc:alpine-3.5
|
||||
|
||||
# Tried to use curl only (curl -o /tmp/linux_amd64.tar.gz ...), however it turned out that the following tar command failed with "gzip: stdin: not in gzip format"
|
||||
RUN apk add --no-cache --virtual build-dependencies --update wget curl ca-certificates && \
|
||||
wget -P /tmp https://github.com/$(curl -s -L https://github.com/chrislusf/seaweedfs/releases/latest | egrep -o 'chrislusf/seaweedfs/releases/download/.*/linux_amd64.tar.gz') && \
|
||||
tar -C /usr/bin/ -xzvf /tmp/linux_amd64.tar.gz && \
|
||||
apk del build-dependencies && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# volume server gprc port
|
||||
EXPOSE 18080
|
||||
# volume server http port
|
||||
EXPOSE 8080
|
||||
# filer server gprc port
|
||||
EXPOSE 18888
|
||||
# filer server http port
|
||||
EXPOSE 8888
|
||||
# master server shared gprc+http port
|
||||
EXPOSE 9333
|
||||
|
||||
VOLUME /data
|
||||
|
||||
COPY filer.toml /etc/seaweedfs/filer.toml
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
|
@ -1,30 +0,0 @@
|
|||
FROM ubuntu:22.04
|
||||
|
||||
LABEL author="Chris Lu"
|
||||
|
||||
RUN apt-get update && apt-get install -y curl fio fuse
|
||||
RUN mkdir -p /etc/seaweedfs /data/filerldb2
|
||||
|
||||
COPY ./weed /usr/bin/
|
||||
COPY ./filer.toml /etc/seaweedfs/filer.toml
|
||||
COPY ./entrypoint.sh /entrypoint.sh
|
||||
|
||||
# volume server grpc port
|
||||
EXPOSE 18080
|
||||
# volume server http port
|
||||
EXPOSE 8080
|
||||
# filer server grpc port
|
||||
EXPOSE 18888
|
||||
# filer server http port
|
||||
EXPOSE 8888
|
||||
# master server shared grpc port
|
||||
EXPOSE 19333
|
||||
# master server shared http port
|
||||
EXPOSE 9333
|
||||
|
||||
VOLUME /data
|
||||
WORKDIR /data
|
||||
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
|
@ -1,44 +0,0 @@
|
|||
FROM golang:1.24-alpine as builder
|
||||
RUN apk add git g++ fuse
|
||||
RUN mkdir -p /go/src/github.com/seaweedfs/
|
||||
RUN git clone https://github.com/seaweedfs/seaweedfs /go/src/github.com/seaweedfs/seaweedfs
|
||||
ARG BRANCH=${BRANCH:-master}
|
||||
ARG TAGS
|
||||
RUN cd /go/src/github.com/seaweedfs/seaweedfs && git checkout $BRANCH
|
||||
RUN cd /go/src/github.com/seaweedfs/seaweedfs/weed \
|
||||
&& export LDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=$(git rev-parse --short HEAD)" \
|
||||
&& CGO_ENABLED=0 go install -tags "$TAGS" -ldflags "-extldflags -static ${LDFLAGS}"
|
||||
|
||||
FROM alpine AS final
|
||||
LABEL author="Chris Lu"
|
||||
COPY --from=builder /go/bin/weed /usr/bin/
|
||||
RUN mkdir -p /etc/seaweedfs
|
||||
COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml
|
||||
COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/entrypoint.sh /entrypoint.sh
|
||||
RUN apk add fuse # for weed mount
|
||||
|
||||
# volume server gprc port
|
||||
EXPOSE 18080
|
||||
# volume server http port
|
||||
EXPOSE 8080
|
||||
# filer server gprc port
|
||||
EXPOSE 18888
|
||||
# filer server http port
|
||||
EXPOSE 8888
|
||||
# master server shared gprc port
|
||||
EXPOSE 19333
|
||||
# master server shared http port
|
||||
EXPOSE 9333
|
||||
# s3 server http port
|
||||
EXPOSE 8333
|
||||
# webdav server http port
|
||||
EXPOSE 7333
|
||||
|
||||
RUN mkdir -p /data/filerldb2
|
||||
|
||||
VOLUME /data
|
||||
WORKDIR /data
|
||||
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
|
@ -1,36 +0,0 @@
|
|||
FROM alpine AS final
|
||||
LABEL author="Chris Lu"
|
||||
COPY ./weed /usr/bin/
|
||||
COPY ./weed_pub* /usr/bin/
|
||||
COPY ./weed_sub* /usr/bin/
|
||||
RUN mkdir -p /etc/seaweedfs
|
||||
COPY ./filer.toml /etc/seaweedfs/filer.toml
|
||||
COPY ./entrypoint.sh /entrypoint.sh
|
||||
RUN apk add fuse # for weed mount
|
||||
RUN apk add curl # for health checks
|
||||
|
||||
# volume server grpc port
|
||||
EXPOSE 18080
|
||||
# volume server http port
|
||||
EXPOSE 8080
|
||||
# filer server grpc port
|
||||
EXPOSE 18888
|
||||
# filer server http port
|
||||
EXPOSE 8888
|
||||
# master server shared grpc port
|
||||
EXPOSE 19333
|
||||
# master server shared http port
|
||||
EXPOSE 9333
|
||||
# s3 server http port
|
||||
EXPOSE 8333
|
||||
# webdav server http port
|
||||
EXPOSE 7333
|
||||
|
||||
RUN mkdir -p /data/filerldb2
|
||||
|
||||
VOLUME /data
|
||||
WORKDIR /data
|
||||
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
|
@ -1,16 +0,0 @@
|
|||
FROM golang:1.24 as builder
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y build-essential libsnappy-dev zlib1g-dev libbz2-dev libgflags-dev liblz4-dev libzstd-dev
|
||||
|
||||
ENV ROCKSDB_VERSION v10.2.1
|
||||
|
||||
# build RocksDB
|
||||
RUN cd /tmp && \
|
||||
git clone https://github.com/facebook/rocksdb.git /tmp/rocksdb --depth 1 --single-branch --branch $ROCKSDB_VERSION && \
|
||||
cd rocksdb && \
|
||||
PORTABLE=1 make static_lib && \
|
||||
make install-static
|
||||
|
||||
ENV CGO_CFLAGS "-I/tmp/rocksdb/include"
|
||||
ENV CGO_LDFLAGS "-L/tmp/rocksdb -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy -llz4 -lzstd"
|
|
@ -1,61 +0,0 @@
|
|||
FROM golang:1.24 as builder
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y build-essential libsnappy-dev zlib1g-dev libbz2-dev libgflags-dev liblz4-dev libzstd-dev
|
||||
|
||||
ENV ROCKSDB_VERSION v10.2.1
|
||||
|
||||
# build RocksDB
|
||||
RUN cd /tmp && \
|
||||
git clone https://github.com/facebook/rocksdb.git /tmp/rocksdb --depth 1 --single-branch --branch $ROCKSDB_VERSION && \
|
||||
cd rocksdb && \
|
||||
PORTABLE=1 make static_lib && \
|
||||
make install-static
|
||||
|
||||
ENV CGO_CFLAGS "-I/tmp/rocksdb/include"
|
||||
ENV CGO_LDFLAGS "-L/tmp/rocksdb -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy -llz4 -lzstd"
|
||||
|
||||
# build SeaweedFS
|
||||
RUN mkdir -p /go/src/github.com/seaweedfs/
|
||||
RUN git clone https://github.com/seaweedfs/seaweedfs /go/src/github.com/seaweedfs/seaweedfs
|
||||
ARG BRANCH=${BRANCH:-master}
|
||||
RUN cd /go/src/github.com/seaweedfs/seaweedfs && git checkout $BRANCH
|
||||
RUN cd /go/src/github.com/seaweedfs/seaweedfs/weed \
|
||||
&& export LDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=$(git rev-parse --short HEAD)" \
|
||||
&& go install -tags "5BytesOffset rocksdb" -ldflags "-extldflags -static ${LDFLAGS}"
|
||||
|
||||
|
||||
FROM alpine AS final
|
||||
LABEL author="Chris Lu"
|
||||
COPY --from=builder /go/bin/weed /usr/bin/
|
||||
RUN mkdir -p /etc/seaweedfs
|
||||
COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/filer_rocksdb.toml /etc/seaweedfs/filer.toml
|
||||
COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/entrypoint.sh /entrypoint.sh
|
||||
RUN apk add fuse snappy gflags
|
||||
|
||||
# volume server gprc port
|
||||
EXPOSE 18080
|
||||
# volume server http port
|
||||
EXPOSE 8080
|
||||
# filer server gprc port
|
||||
EXPOSE 18888
|
||||
# filer server http port
|
||||
EXPOSE 8888
|
||||
# master server shared gprc port
|
||||
EXPOSE 19333
|
||||
# master server shared http port
|
||||
EXPOSE 9333
|
||||
# s3 server http port
|
||||
EXPOSE 8333
|
||||
# webdav server http port
|
||||
EXPOSE 7333
|
||||
|
||||
RUN mkdir -p /data/filer_rocksdb
|
||||
|
||||
VOLUME /data
|
||||
|
||||
WORKDIR /data
|
||||
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
|
@ -1,45 +0,0 @@
|
|||
FROM chrislusf/rocksdb_dev_env as builder
|
||||
|
||||
# build SeaweedFS
|
||||
RUN mkdir -p /go/src/github.com/seaweedfs/
|
||||
ADD . /go/src/github.com/seaweedfs/seaweedfs
|
||||
RUN ls -al /go/src/github.com/seaweedfs/ && \
|
||||
cd /go/src/github.com/seaweedfs/seaweedfs/weed \
|
||||
&& export LDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=$(git rev-parse --short HEAD)" \
|
||||
&& go install -tags "5BytesOffset rocksdb" -ldflags "-extldflags -static ${LDFLAGS}"
|
||||
|
||||
|
||||
FROM alpine AS final
|
||||
LABEL author="Chris Lu"
|
||||
COPY --from=builder /go/bin/weed /usr/bin/
|
||||
RUN mkdir -p /etc/seaweedfs
|
||||
COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/filer_rocksdb.toml /etc/seaweedfs/filer.toml
|
||||
COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/entrypoint.sh /entrypoint.sh
|
||||
RUN apk add fuse snappy gflags tmux
|
||||
|
||||
# volume server gprc port
|
||||
EXPOSE 18080
|
||||
# volume server http port
|
||||
EXPOSE 8080
|
||||
# filer server gprc port
|
||||
EXPOSE 18888
|
||||
# filer server http port
|
||||
EXPOSE 8888
|
||||
# master server shared gprc port
|
||||
EXPOSE 19333
|
||||
# master server shared http port
|
||||
EXPOSE 9333
|
||||
# s3 server http port
|
||||
EXPOSE 8333
|
||||
# webdav server http port
|
||||
EXPOSE 7333
|
||||
|
||||
RUN mkdir -p /data/filer_rocksdb
|
||||
|
||||
VOLUME /data
|
||||
|
||||
WORKDIR /data
|
||||
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
|
@ -1,31 +0,0 @@
|
|||
FROM ubuntu:20.04
|
||||
|
||||
RUN DEBIAN_FRONTEND=noninteractive apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get upgrade -y && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
|
||||
git \
|
||||
sudo \
|
||||
debianutils \
|
||||
python3-pip \
|
||||
python3-virtualenv \
|
||||
python3-dev \
|
||||
libevent-dev \
|
||||
libffi-dev \
|
||||
libxml2-dev \
|
||||
libxslt-dev \
|
||||
zlib1g-dev && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/* && \
|
||||
git clone https://github.com/ceph/s3-tests.git /opt/s3-tests
|
||||
|
||||
WORKDIR /opt/s3-tests
|
||||
RUN ./bootstrap
|
||||
|
||||
ENV \
|
||||
NOSETESTS_EXCLUDE="" \
|
||||
NOSETESTS_ATTR="" \
|
||||
NOSETESTS_OPTIONS="" \
|
||||
S3TEST_CONF="/s3tests.conf"
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "-c"]
|
||||
CMD ["sleep 30 && exec ./virtualenv/bin/nosetests ${NOSETESTS_OPTIONS-} ${NOSETESTS_ATTR:+-a $NOSETESTS_ATTR} ${NOSETESTS_EXCLUDE:+-e $NOSETESTS_EXCLUDE}"]
|
|
@ -1,17 +0,0 @@
|
|||
FROM tarantool/tarantool:3.3.1 AS builder
|
||||
|
||||
# install dependencies
|
||||
RUN apt update && \
|
||||
apt install -y git unzip cmake tt=2.7.0
|
||||
|
||||
# init tt dir structure, create dir for app, create symlink
|
||||
RUN tt init && \
|
||||
mkdir app && \
|
||||
ln -sfn ${PWD}/app/ ${PWD}/instances.enabled/app
|
||||
|
||||
# copy cluster configs
|
||||
COPY tarantool /opt/tarantool/app
|
||||
|
||||
# build app
|
||||
RUN tt build app
|
||||
|
131
docker/Makefile
131
docker/Makefile
|
@ -1,131 +0,0 @@
|
|||
all: gen
|
||||
|
||||
.PHONY : gen
|
||||
|
||||
gen: dev
|
||||
|
||||
cgo ?= 0
|
||||
binary:
|
||||
export SWCOMMIT=$(shell git rev-parse --short HEAD)
|
||||
export SWLDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=$(SWCOMMIT)"
|
||||
cd ../weed && CGO_ENABLED=$(cgo) GOOS=linux go build $(options) -tags "$(tags)" -ldflags "-s -w -extldflags -static $(SWLDFLAGS)" && mv weed ../docker/
|
||||
cd ../other/mq_client_example/agent_pub_record && CGO_ENABLED=$(cgo) GOOS=linux go build && mv agent_pub_record ../../../docker/
|
||||
cd ../other/mq_client_example/agent_sub_record && CGO_ENABLED=$(cgo) GOOS=linux go build && mv agent_sub_record ../../../docker/
|
||||
|
||||
binary_race: options = -race
|
||||
binary_race: cgo = 1
|
||||
binary_race: binary
|
||||
|
||||
build: binary
|
||||
docker build --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.local .
|
||||
|
||||
build_e2e: binary_race
|
||||
docker build --no-cache -t chrislusf/seaweedfs:e2e -f Dockerfile.e2e .
|
||||
|
||||
go_build: # make go_build tags=elastic,ydb,gocdk,hdfs,5BytesOffset,tarantool
|
||||
docker build --build-arg TAGS=$(tags) --no-cache -t chrislusf/seaweedfs:go_build -f Dockerfile.go_build .
|
||||
|
||||
go_build_large_disk:
|
||||
docker build --build-arg TAGS=large_disk --no-cache -t chrislusf/seaweedfs:large_disk -f Dockerfile.go_build .
|
||||
|
||||
build_rocksdb_dev_env:
|
||||
docker build --no-cache -t chrislusf/rocksdb_dev_env -f Dockerfile.rocksdb_dev_env .
|
||||
|
||||
build_rocksdb_local: build_rocksdb_dev_env
|
||||
cd .. ; docker build --no-cache -t chrislusf/seaweedfs:rocksdb_local -f docker/Dockerfile.rocksdb_large_local .
|
||||
|
||||
build_rocksdb:
|
||||
docker build --no-cache -t chrislusf/seaweedfs:rocksdb -f Dockerfile.rocksdb_large .
|
||||
|
||||
build_tarantool_dev_env:
|
||||
docker build --no-cache -t chrislusf/tarantool_dev_env -f Dockerfile.tarantool.dev_env .
|
||||
|
||||
s3tests_build:
|
||||
docker build --no-cache -t chrislusf/ceph-s3-tests:local -f Dockerfile.s3tests .
|
||||
|
||||
dev: build
|
||||
docker compose -f compose/local-dev-compose.yml -p seaweedfs up
|
||||
|
||||
dev_race: binary_race
|
||||
docker compose -f compose/local-dev-compose.yml -p seaweedfs up
|
||||
|
||||
dev_tls: build certstrap
|
||||
ENV_FILE="tls.env" docker compose -f compose/local-dev-compose.yml -p seaweedfs up
|
||||
|
||||
dev_mount: build
|
||||
docker compose -f compose/local-mount-compose.yml -p seaweedfs up
|
||||
|
||||
run_image: build
|
||||
docker run --rm -ti --device /dev/fuse --cap-add SYS_ADMIN --entrypoint /bin/sh chrislusf/seaweedfs:local
|
||||
|
||||
profile_mount: build
|
||||
docker compose -f compose/local-mount-profile-compose.yml -p seaweedfs up
|
||||
|
||||
k8s: build
|
||||
docker compose -f compose/local-k8s-compose.yml -p seaweedfs up
|
||||
|
||||
dev_registry: build
|
||||
docker compose -f compose/local-registry-compose.yml -p seaweedfs up
|
||||
|
||||
dev_replicate:
|
||||
docker build --build-arg TAGS=gocdk --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.go_build .
|
||||
docker compose -f compose/local-replicate-compose.yml -p seaweedfs up
|
||||
|
||||
dev_auditlog: build
|
||||
docker compose -f compose/local-auditlog-compose.yml -p seaweedfs up
|
||||
|
||||
dev_nextcloud: build
|
||||
docker compose -f compose/local-nextcloud-compose.yml -p seaweedfs up
|
||||
|
||||
cluster: build
|
||||
docker compose -f compose/local-cluster-compose.yml -p seaweedfs up
|
||||
|
||||
2clusters: build
|
||||
docker compose -f compose/local-clusters-compose.yml -p seaweedfs up
|
||||
|
||||
2mount: build
|
||||
docker compose -f compose/local-sync-mount-compose.yml -p seaweedfs up
|
||||
|
||||
filer_backup: build
|
||||
docker compose -f compose/local-filer-backup-compose.yml -p seaweedfs up
|
||||
|
||||
hashicorp_raft: build
|
||||
docker compose -f compose/local-hashicorp-raft-compose.yml -p seaweedfs up
|
||||
|
||||
s3tests: build s3tests_build
|
||||
docker compose -f compose/local-s3tests-compose.yml -p seaweedfs up
|
||||
|
||||
brokers: build
|
||||
docker compose -f compose/local-brokers-compose.yml -p seaweedfs up
|
||||
|
||||
agent: build
|
||||
docker compose -f compose/local-mq-test.yml -p seaweedfs up
|
||||
|
||||
filer_etcd: build
|
||||
docker stack deploy -c compose/swarm-etcd.yml fs
|
||||
|
||||
test_etcd: build
|
||||
docker compose -f compose/test-etcd-filer.yml -p seaweedfs up
|
||||
|
||||
test_ydb: tags = ydb
|
||||
test_ydb: build
|
||||
docker compose -f compose/test-ydb-filer.yml -p seaweedfs up
|
||||
|
||||
test_tarantool: tags = tarantool
|
||||
test_tarantool: build_tarantool_dev_env build
|
||||
docker compose -f compose/test-tarantool-filer.yml -p seaweedfs up
|
||||
|
||||
clean:
|
||||
rm ./weed
|
||||
|
||||
certstrap:
|
||||
go install -v github.com/square/certstrap@latest
|
||||
certstrap --depot-path compose/tls init --curve P-256 --passphrase "" --common-name "SeaweedFS CA" || true
|
||||
certstrap --depot-path compose/tls request-cert --ou "SeaweedFS" --curve P-256 --passphrase "" --domain localhost --common-name volume01.dev || true
|
||||
certstrap --depot-path compose/tls request-cert --ou "SeaweedFS" --curve P-256 --passphrase "" --common-name master01.dev || true
|
||||
certstrap --depot-path compose/tls request-cert --ou "SeaweedFS" --curve P-256 --passphrase "" --common-name filer01.dev || true
|
||||
certstrap --depot-path compose/tls request-cert --ou "SeaweedFS" --curve P-256 --passphrase "" --common-name client01.dev || true
|
||||
certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" volume01.dev || true
|
||||
certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" master01.dev || true
|
||||
certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" filer01.dev || true
|
||||
certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" client01.dev || true
|
|
@ -1,61 +0,0 @@
|
|||
# Docker
|
||||
|
||||
## Compose V2
|
||||
SeaweedFS now uses the `v2` syntax `docker compose`
|
||||
|
||||
If you rely on using Docker Compose as docker-compose (with a hyphen), you can set up Compose V2 to act as a drop-in replacement of the previous docker-compose. Refer to the [Installing Compose](https://docs.docker.com/compose/install/) section for detailed instructions on upgrading.
|
||||
|
||||
Confirm your system has docker compose v2 with a version check
|
||||
```bash
|
||||
$ docker compose version
|
||||
Docker Compose version v2.10.2
|
||||
```
|
||||
|
||||
## Try it out
|
||||
|
||||
```bash
|
||||
|
||||
wget https://raw.githubusercontent.com/seaweedfs/seaweedfs/master/docker/seaweedfs-compose.yml
|
||||
|
||||
docker compose -f seaweedfs-compose.yml -p seaweedfs up
|
||||
|
||||
```
|
||||
|
||||
## Try latest tip
|
||||
|
||||
```bash
|
||||
|
||||
wget https://raw.githubusercontent.com/seaweedfs/seaweedfs/master/docker/seaweedfs-dev-compose.yml
|
||||
|
||||
docker compose -f seaweedfs-dev-compose.yml -p seaweedfs up
|
||||
|
||||
```
|
||||
|
||||
## Local Development
|
||||
|
||||
```bash
|
||||
cd $GOPATH/src/github.com/seaweedfs/seaweedfs/docker
|
||||
make
|
||||
```
|
||||
|
||||
### S3 cmd
|
||||
|
||||
list
|
||||
```
|
||||
s3cmd --no-ssl --host=127.0.0.1:8333 ls s3://
|
||||
```
|
||||
|
||||
## Build and push a multiarch build
|
||||
|
||||
Make sure that `docker buildx` is supported (might be an experimental docker feature)
|
||||
```bash
|
||||
BUILDER=$(docker buildx create --driver docker-container --use)
|
||||
docker buildx build --pull --push --platform linux/386,linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v6 . -t chrislusf/seaweedfs
|
||||
docker buildx stop $BUILDER
|
||||
```
|
||||
|
||||
## Minio debugging
|
||||
```
|
||||
mc config host add local http://127.0.0.1:9000 some_access_key1 some_secret_key1
|
||||
mc admin trace --all --verbose local
|
||||
```
|
|
@ -1,53 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:e2e
|
||||
command: "-v=4 master -ip=master -ip.bind=0.0.0.0 -raftBootstrap"
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "--fail", "-I", "http://localhost:9333/cluster/healthz" ]
|
||||
interval: 1s
|
||||
timeout: 60s
|
||||
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:e2e
|
||||
command: "-v=4 volume -mserver=master:9333 -ip=volume -ip.bind=0.0.0.0 -preStopSeconds=1"
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "--fail", "-I", "http://localhost:8080/healthz" ]
|
||||
interval: 1s
|
||||
timeout: 30s
|
||||
depends_on:
|
||||
master:
|
||||
condition: service_healthy
|
||||
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:e2e
|
||||
command: "-v=4 filer -master=master:9333 -ip=filer -ip.bind=0.0.0.0"
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "--fail", "-I", "http://localhost:8888" ]
|
||||
interval: 1s
|
||||
timeout: 30s
|
||||
depends_on:
|
||||
volume:
|
||||
condition: service_healthy
|
||||
|
||||
mount:
|
||||
image: chrislusf/seaweedfs:e2e
|
||||
command: "-v=4 mount -filer=filer:8888 -filer.path=/ -dirAutoCreate -dir=/mnt/seaweedfs"
|
||||
cap_add:
|
||||
- SYS_ADMIN
|
||||
devices:
|
||||
- /dev/fuse
|
||||
security_opt:
|
||||
- apparmor:unconfined
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 4096m
|
||||
healthcheck:
|
||||
test: [ "CMD", "mountpoint", "-q", "--", "/mnt/seaweedfs" ]
|
||||
interval: 1s
|
||||
timeout: 30s
|
||||
depends_on:
|
||||
filer:
|
||||
condition: service_healthy
|
|
@ -1,8 +0,0 @@
|
|||
<source>
|
||||
@type forward
|
||||
port 24224
|
||||
</source>
|
||||
|
||||
<match **>
|
||||
@type stdout # Output logs to container's stdout (visible via `docker logs`)
|
||||
</match>
|
|
@ -1,4 +0,0 @@
|
|||
{
|
||||
"fluent_port": 24224,
|
||||
"fluent_host": "fluent"
|
||||
}
|
|
@ -1,38 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8333:8333
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
- 8084:8080
|
||||
- 18084:18080
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 8000:8000
|
||||
command: "server -ip=s3 -filer -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8000 -s3.auditLogConfig=/etc/seaweedfs/fluent.json -volume.max=0 -master.volumeSizeLimitMB=8 -volume.preStopSeconds=1"
|
||||
volumes:
|
||||
- ./fluent.json:/etc/seaweedfs/fluent.json
|
||||
- ./s3.json:/etc/seaweedfs/s3.json
|
||||
depends_on:
|
||||
- fluent
|
||||
fluent:
|
||||
image: fluent/fluentd:v1.17
|
||||
volumes:
|
||||
- ./fluent.conf:/fluentd/etc/fluent.conf
|
||||
ports:
|
||||
- 24224:24224
|
||||
#s3tests:
|
||||
# image: chrislusf/ceph-s3-tests:local
|
||||
# volumes:
|
||||
# - ./s3tests.conf:/opt/s3-tests/s3tests.conf
|
||||
# environment:
|
||||
# S3TEST_CONF: "s3tests.conf"
|
||||
# NOSETESTS_OPTIONS: "--verbose --logging-level=ERROR --with-xunit --failure-detail s3tests_boto3.functional.test_s3"
|
||||
# NOSETESTS_ATTR: "!tagging,!fails_on_aws,!encryption,!bucket-policy,!versioning,!fails_on_rgw,!bucket-policy,!fails_with_subdomain,!policy_status,!object-lock,!lifecycle,!cors,!user-policy"
|
||||
# NOSETESTS_EXCLUDE: "(get_bucket_encryption|put_bucket_encryption|bucket_list_delimiter_basic|bucket_listv2_delimiter_basic|bucket_listv2_encoding_basic|bucket_list_encoding_basic|bucket_list_delimiter_prefix|bucket_listv2_delimiter_prefix_ends_with_delimiter|bucket_list_delimiter_prefix_ends_with_delimiter|bucket_list_delimiter_alt|bucket_listv2_delimiter_alt|bucket_list_delimiter_prefix_underscore|bucket_list_delimiter_percentage|bucket_listv2_delimiter_percentage|bucket_list_delimiter_whitespace|bucket_listv2_delimiter_whitespace|bucket_list_delimiter_dot|bucket_listv2_delimiter_dot|bucket_list_delimiter_unreadable|bucket_listv2_delimiter_unreadable|bucket_listv2_fetchowner_defaultempty|bucket_listv2_fetchowner_empty|bucket_list_prefix_delimiter_alt|bucket_listv2_prefix_delimiter_alt|bucket_list_prefix_delimiter_prefix_not_exist|bucket_listv2_prefix_delimiter_prefix_not_exist|bucket_list_prefix_delimiter_delimiter_not_exist|bucket_listv2_prefix_delimiter_delimiter_not_exist|bucket_list_prefix_delimiter_prefix_delimiter_not_exist|bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist|bucket_list_maxkeys_none|bucket_listv2_maxkeys_none|bucket_list_maxkeys_invalid|bucket_listv2_continuationtoken_empty|bucket_list_return_data|bucket_list_objects_anonymous|bucket_listv2_objects_anonymous|bucket_notexist|bucketv2_notexist|bucket_delete_nonempty|bucket_concurrent_set_canned_acl|object_write_to_nonexist_bucket|object_requestid_matches_header_on_error|object_set_get_metadata_none_to_good|object_set_get_metadata_none_to_empty|object_set_get_metadata_overwrite_to_empty|post_object_anonymous_request|post_object_authenticated_request|post_object_authenticated_no_content_type|post_object_authenticated_request_bad_access_key|post_object_set_success_code|post_object_set_invalid_success_code|post_object_upload_larger_than_chunk|post_object_set_key_from_filename|post_object_ignored_header|post_object_case_insensitive_condition_fields|post_object_escaped_field_values|post_object_success_redirect_action|post_object_invalid_signature|post_object_invalid_access_key|post_object_missing_policy_condition|post_object_user_specified_header|post_object_request_missing_policy_specified_field|post_object_expired_policy|post_object_invalid_request_field_value|get_object_ifunmodifiedsince_good|put_object_ifmatch_failed|object_raw_get_bucket_gone|object_delete_key_bucket_gone|object_raw_get_bucket_acl|object_raw_get_object_acl|object_raw_response_headers|object_raw_authenticated_bucket_gone|object_raw_get_x_amz_expires_out_max_range|object_raw_get_x_amz_expires_out_positive_range|object_anon_put_write_access|object_raw_put_authenticated_expired|bucket_create_exists|bucket_create_naming_bad_short_one|bucket_create_naming_bad_short_two|bucket_get_location|bucket_acl_default|bucket_acl_canned|bucket_acl_canned_publicreadwrite|bucket_acl_canned_authenticatedread|object_acl_default|object_acl_canned_during_create|object_acl_canned|object_acl_canned_publicreadwrite|object_acl_canned_authenticatedread|object_acl_canned_bucketownerread|object_acl_canned_bucketownerfullcontrol|object_acl_full_control_verify_attributes|bucket_acl_canned_private_to_private|bucket_acl_grant_nonexist_user|bucket_acl_no_grants|bucket_acl_grant_email_not_exist|bucket_acl_revoke_all|bucket_recreate_not_overriding|object_copy_verify_contenttype|object_copy_to_itself_with_metadata|object_copy_not_owned_bucket|object_copy_not_owned_object_bucket|object_copy_retaining_metadata|object_copy_replacing_metadata|multipart_upload_empty|multipart_copy_invalid_range|multipart_copy_special_names|multipart_upload_resend_part|multipart_upload_size_too_small|abort_multipart_upload_not_found|multipart_upload_missing_part|multipart_upload_incorrect_etag|100_continue|ranged_request_invalid_range|ranged_request_empty_object|access_bucket)"
|
||||
# depends_on:
|
||||
# - s3
|
||||
# - fluent
|
|
@ -1,127 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
master0:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "-v=0 master -volumeSizeLimitMB 100 -resumeState=false -ip=master0 -port=9333 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
|
||||
environment:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
master1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9334:9334
|
||||
- 19334:19334
|
||||
command: "-v=0 master -volumeSizeLimitMB 100 -resumeState=false -ip=master1 -port=9334 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
|
||||
environment:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
master2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9335:9335
|
||||
- 19335:19335
|
||||
command: "-v=0 master -volumeSizeLimitMB 100 -resumeState=false -ip=master2 -port=9335 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
|
||||
environment:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
volume1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: 'volume -dataCenter=dc1 -rack=v1 -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume1 -publicUrl=localhost:8080 -preStopSeconds=1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
volume2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8082:8082
|
||||
- 18082:18082
|
||||
command: 'volume -dataCenter=dc2 -rack=v2 -mserver="master0:9333,master1:9334,master2:9335" -port=8082 -ip=volume2 -publicUrl=localhost:8082 -preStopSeconds=1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
volume3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8083:8083
|
||||
- 18083:18083
|
||||
command: 'volume -dataCenter=dc3 -rack=v3 -mserver="master0:9333,master1:9334,master2:9335" -port=8083 -ip=volume3 -publicUrl=localhost:8083 -preStopSeconds=1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
filer1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: 'filer -defaultReplicaPlacement=100 -iam -master="master0:9333,master1:9334,master2:9335" -port=8888 -ip=filer1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
- volume1
|
||||
- volume2
|
||||
filer2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8889:8889
|
||||
- 18889:18889
|
||||
command: 'filer -defaultReplicaPlacement=100 -iam -master="master0:9333,master1:9334,master2:9335" -port=8889 -ip=filer2'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
- volume1
|
||||
- volume2
|
||||
- filer1
|
||||
broker1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 17777:17777
|
||||
command: 'mq.broker -master="master0:9333,master1:9334,master2:9335" -port=17777 -ip=broker1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
- volume1
|
||||
- volume2
|
||||
- filer1
|
||||
- filer2
|
||||
broker2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 17778:17778
|
||||
command: 'mq.broker -master="master0:9333,master1:9334,master2:9335" -port=17778 -ip=broker2'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
- volume1
|
||||
- volume2
|
||||
- filer1
|
||||
- filer2
|
||||
broker3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 17779:17779
|
||||
command: 'mq.broker -master="master0:9333,master1:9334,master2:9335" -port=17779 -ip=broker3'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
- volume1
|
||||
- volume2
|
||||
- filer1
|
||||
- filer2
|
|
@ -1,88 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
master0:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "-v=1 master -volumeSizeLimitMB 100 -resumeState=false -ip=master0 -port=9333 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
|
||||
environment:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
master1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9334:9334
|
||||
- 19334:19334
|
||||
command: "-v=1 master -volumeSizeLimitMB 100 -resumeState=false -ip=master1 -port=9334 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
|
||||
environment:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
master2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9335:9335
|
||||
- 19335:19335
|
||||
command: "-v=1 master -volumeSizeLimitMB 100 -resumeState=false -ip=master2 -port=9335 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
|
||||
environment:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
volume1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: 'volume -dataCenter=dc1 -rack=v1 -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume1 -publicUrl=localhost:8080 -preStopSeconds=1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
volume2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8082:8082
|
||||
- 18082:18082
|
||||
command: 'volume -dataCenter=dc2 -rack=v2 -mserver="master0:9333,master1:9334,master2:9335" -port=8082 -ip=volume2 -publicUrl=localhost:8082 -preStopSeconds=1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
volume3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8083:8083
|
||||
- 18083:18083
|
||||
command: 'volume -dataCenter=dc3 -rack=v3 -mserver="master0:9333,master1:9334,master2:9335" -port=8083 -ip=volume3 -publicUrl=localhost:8083 -preStopSeconds=1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 8111:8111
|
||||
command: 'filer -defaultReplicaPlacement=100 -iam -master="master0:9333,master1:9334,master2:9335"'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
- volume1
|
||||
- volume2
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: '-v=9 s3 -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
- volume1
|
||||
- volume2
|
||||
- filer
|
|
@ -1,28 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
server1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
- 8084:8080
|
||||
- 18084:18080
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: "server -ip=server1 -filer -volume.max=0 -master.volumeSizeLimitMB=100 -volume.preStopSeconds=1"
|
||||
volumes:
|
||||
- ./master-cloud.toml:/etc/seaweedfs/master.toml
|
||||
depends_on:
|
||||
- server2
|
||||
server2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9334:9333
|
||||
- 19334:19333
|
||||
- 8085:8080
|
||||
- 18085:18080
|
||||
- 8889:8888
|
||||
- 18889:18888
|
||||
- 8334:8333
|
||||
command: "server -ip=server2 -filer -s3 -volume.max=0 -master.volumeSizeLimitMB=100 -volume.preStopSeconds=1"
|
|
@ -1,80 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "-v=1 master -ip=master -volumeSizeLimitMB=10"
|
||||
volumes:
|
||||
- ./tls:/etc/seaweedfs/tls
|
||||
env_file:
|
||||
- ${ENV_FILE:-dev.env}
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: "-v=1 volume -mserver=master:9333 -port=8080 -ip=volume -preStopSeconds=1 -max=10000"
|
||||
depends_on:
|
||||
- master
|
||||
volumes:
|
||||
- ./tls:/etc/seaweedfs/tls
|
||||
env_file:
|
||||
- ${ENV_FILE:-dev.env}
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: '-v=1 filer -ip.bind=0.0.0.0 -master="master:9333"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
volumes:
|
||||
- ./tls:/etc/seaweedfs/tls
|
||||
env_file:
|
||||
- ${ENV_FILE:-dev.env}
|
||||
|
||||
iam:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8111:8111
|
||||
command: '-v=1 iam -filer="filer:8888" -master="master:9333"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
volumes:
|
||||
- ./tls:/etc/seaweedfs/tls
|
||||
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: '-v=1 s3 -filer="filer:8888" -ip.bind=s3'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
volumes:
|
||||
- ./tls:/etc/seaweedfs/tls
|
||||
env_file:
|
||||
- ${ENV_FILE:-dev.env}
|
||||
|
||||
mount:
|
||||
image: chrislusf/seaweedfs:local
|
||||
privileged: true
|
||||
cap_add:
|
||||
- SYS_ADMIN
|
||||
mem_limit: 4096m
|
||||
command: '-v=4 mount -filer="filer:8888" -dirAutoCreate -dir=/mnt/seaweedfs -cacheCapacityMB=100 -concurrentWriters=128'
|
||||
volumes:
|
||||
- ./tls:/etc/seaweedfs/tls
|
||||
env_file:
|
||||
- ${ENV_FILE:-dev.env}
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
|
@ -1,54 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
server-left:
|
||||
image: chrislusf/seaweedfs:local
|
||||
command: "-v=0 server -ip=server-left -filer -filer.maxMB 5 -s3 -s3.config=/etc/seaweedfs/s3.json -volume.max=0 -master.volumeSizeLimitMB=100 -volume.preStopSeconds=1"
|
||||
volumes:
|
||||
- ./s3.json:/etc/seaweedfs/s3.json
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "--fail", "-I", "http://localhost:9333/cluster/healthz" ]
|
||||
interval: 3s
|
||||
start_period: 15s
|
||||
timeout: 30s
|
||||
server-right:
|
||||
image: chrislusf/seaweedfs:local
|
||||
command: "-v=0 server -ip=server-right -filer -filer.maxMB 64 -s3 -s3.config=/etc/seaweedfs/s3.json -volume.max=0 -master.volumeSizeLimitMB=100 -volume.preStopSeconds=1"
|
||||
volumes:
|
||||
- ./s3.json:/etc/seaweedfs/s3.json
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "--fail", "-I", "http://localhost:9333/cluster/healthz" ]
|
||||
interval: 3s
|
||||
start_period: 15s
|
||||
timeout: 30s
|
||||
filer-backup:
|
||||
image: chrislusf/seaweedfs:local
|
||||
command: "-v=0 filer.backup -debug -doDeleteFiles=False -filer server-left:8888"
|
||||
volumes:
|
||||
- ./replication.toml:/etc/seaweedfs/replication.toml
|
||||
environment:
|
||||
WEED_SINK_LOCAL_INCREMENTAL_ENABLED: "false"
|
||||
WEED_SINK_S3_ENABLED: "true"
|
||||
WEED_SINK_S3_BUCKET: "backup"
|
||||
WEED_SINK_S3_ENDPOINT: "http://server-right:8333"
|
||||
WEED_SINK_S3_DIRECTORY: "/"
|
||||
WEED_SINK_S3_AWS_ACCESS_KEY_ID: "some_access_key1"
|
||||
WEED_SINK_S3_AWS_SECRET_ACCESS_KEY: "some_secret_key1"
|
||||
WEED_SINK_S3_S3_DISABLE_CONTENT_MD5_VALIDATION: "false"
|
||||
WEED_SINK_S3_UPLOADER_PART_SIZE_MB: "5"
|
||||
WEED_SINK_S3_KEEP_PART_SIZE: "false"
|
||||
depends_on:
|
||||
server-left:
|
||||
condition: service_healthy
|
||||
server-right:
|
||||
condition: service_healthy
|
||||
minio-warp:
|
||||
image: minio/warp
|
||||
command: 'mixed --duration 5s --obj.size=6mb --md5 --objects 10 --concurrent 2'
|
||||
restart: on-failure
|
||||
environment:
|
||||
WARP_HOST: "server-left:8333"
|
||||
WARP_ACCESS_KEY: "some_access_key1"
|
||||
WARP_SECRET_KEY: "some_secret_key1"
|
||||
depends_on:
|
||||
- filer-backup
|
|
@ -1,89 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
master0:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "-v=4 master -volumeSizeLimitMB 100 -raftHashicorp -electionTimeout 1s -ip=master0 -port=9333 -peers=master1:9334,master2:9335 -mdir=/data"
|
||||
volumes:
|
||||
- ./master/0:/data
|
||||
environment:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
master1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9334:9334
|
||||
- 19334:19334
|
||||
command: "-v=4 master -volumeSizeLimitMB 100 -raftHashicorp -electionTimeout 1s -ip=master1 -port=9334 -peers=master0:9333,master2:9335 -mdir=/data"
|
||||
volumes:
|
||||
- ./master/1:/data
|
||||
environment:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
master2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9335:9335
|
||||
- 19335:19335
|
||||
command: "-v=4 master -volumeSizeLimitMB 100 -raftHashicorp -electionTimeout 1s -ip=master2 -port=9335 -peers=master0:9333,master1:9334 -mdir=/data"
|
||||
volumes:
|
||||
- ./master/2:/data
|
||||
environment:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
volume1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: 'volume -dataCenter=dc1 -rack=v1 -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume1 -publicUrl=localhost:8080 -preStopSeconds=1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
volume2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8082:8082
|
||||
- 18082:18082
|
||||
command: 'volume -dataCenter=dc2 -rack=v2 -mserver="master0:9333,master1:9334,master2:9335" -port=8082 -ip=volume2 -publicUrl=localhost:8082 -preStopSeconds=1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
volume3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8083:8083
|
||||
- 18083:18083
|
||||
command: 'volume -dataCenter=dc3 -rack=v3 -mserver="master0:9333,master1:9334,master2:9335" -port=8083 -ip=volume3 -publicUrl=localhost:8083 -preStopSeconds=1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 8111:8111
|
||||
command: 'filer -defaultReplicaPlacement=100 -iam -master="master0:9333,master1:9334,master2:9335"'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- volume1
|
||||
- volume2
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: '-v=9 s3 -ip.bind="s3" -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- volume1
|
||||
- volume2
|
||||
- filer
|
|
@ -1,94 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: "volume -mserver=master:9333 -port=8080 -ip=volume"
|
||||
depends_on:
|
||||
- master
|
||||
mysql:
|
||||
image: percona/percona-server:5.7
|
||||
ports:
|
||||
- 3306:3306
|
||||
volumes:
|
||||
- ./seaweedfs.sql:/docker-entrypoint-initdb.d/seaweedfs.sql
|
||||
environment:
|
||||
- MYSQL_ROOT_PASSWORD=secret
|
||||
- MYSQL_DATABASE=seaweedfs
|
||||
- MYSQL_PASSWORD=secret
|
||||
- MYSQL_USER=seaweedfs
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
environment:
|
||||
- WEED_MYSQL_HOSTNAME=mysql
|
||||
- WEED_MYSQL_PORT=3306
|
||||
- WEED_MYSQL_DATABASE=seaweedfs
|
||||
- WEED_MYSQL_USERNAME=seaweedfs
|
||||
- WEED_MYSQL_PASSWORD=secret
|
||||
- WEED_MYSQL_ENABLED=true
|
||||
- WEED_MYSQL_CONNECTION_MAX_IDLE=5
|
||||
- WEED_MYSQL_CONNECTION_MAX_OPEN=75
|
||||
# "refresh" connection every 10 minutes, eliminating mysql closing "old" connections
|
||||
- WEED_MYSQL_CONNECTION_MAX_LIFETIME_SECONDS=600
|
||||
# enable usage of memsql as filer backend
|
||||
- WEED_MYSQL_INTERPOLATEPARAMS=true
|
||||
- WEED_LEVELDB2_ENABLED=false
|
||||
command: '-v 9 filer -master="master:9333"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- mysql
|
||||
ingress:
|
||||
image: jwilder/nginx-proxy:alpine
|
||||
ports:
|
||||
- "80:80"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/tmp/docker.sock:ro
|
||||
- ./nginx/proxy.conf:/etc/nginx/proxy.conf
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: '-v 9 s3 -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
environment:
|
||||
- VIRTUAL_HOST=ingress
|
||||
- VIRTUAL_PORT=8333
|
||||
registry:
|
||||
image: registry:2
|
||||
environment:
|
||||
REGISTRY_HTTP_ADDR: "0.0.0.0:5001" # seaweedfs s3
|
||||
REGISTRY_LOG_LEVEL: "debug"
|
||||
REGISTRY_STORAGE: "s3"
|
||||
REGISTRY_STORAGE_S3_REGION: "us-east-1"
|
||||
REGISTRY_STORAGE_S3_REGIONENDPOINT: "http://ingress"
|
||||
REGISTRY_STORAGE_S3_BUCKET: "registry"
|
||||
REGISTRY_STORAGE_S3_ACCESSKEY: "some_access_key1"
|
||||
REGISTRY_STORAGE_S3_SECRETKEY: "some_secret_key1"
|
||||
REGISTRY_STORAGE_S3_V4AUTH: "true"
|
||||
REGISTRY_STORAGE_S3_SECURE: "false"
|
||||
REGISTRY_STORAGE_S3_SKIPVERIFY: "true"
|
||||
REGISTRY_STORAGE_S3_ROOTDIRECTORY: "/"
|
||||
REGISTRY_STORAGE_DELETE_ENABLED: "true"
|
||||
REGISTRY_STORAGE_REDIRECT_DISABLE: "true"
|
||||
REGISTRY_VALIDATION_DISABLED: "true"
|
||||
ports:
|
||||
- 5001:5001
|
||||
depends_on:
|
||||
- s3
|
||||
- ingress
|
|
@ -1,50 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master -volumeSizeLimitMB=100"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: "volume -mserver=master:9333 -port=8080 -ip=volume -max=0 -preStopSeconds=1"
|
||||
depends_on:
|
||||
- master
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 8333:8333
|
||||
command: '-v 1 filer -master="master:9333" -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8333'
|
||||
volumes:
|
||||
- ./s3.json:/etc/seaweedfs/s3.json
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
minio-gateway-s3:
|
||||
image: minio/minio
|
||||
ports:
|
||||
- 9000:9000
|
||||
command: 'minio gateway s3 http://s3:8333'
|
||||
restart: on-failure
|
||||
environment:
|
||||
MINIO_ACCESS_KEY: "some_access_key1"
|
||||
MINIO_SECRET_KEY: "some_secret_key1"
|
||||
depends_on:
|
||||
- s3
|
||||
minio-warp:
|
||||
image: minio/warp
|
||||
command: 'mixed --duration=5m --obj.size=3mb --autoterm'
|
||||
restart: on-failure
|
||||
environment:
|
||||
WARP_HOST: "minio-gateway-s3:9000"
|
||||
WARP_ACCESS_KEY: "some_access_key1"
|
||||
WARP_SECRET_KEY: "some_secret_key1"
|
||||
depends_on:
|
||||
- minio-gateway-s3
|
|
@ -1,46 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 7455:8080
|
||||
- 9325:9325
|
||||
command: 'volume -mserver="master:9333" -port=8080 -metricsPort=9325 -preStopSeconds=1 -publicUrl=localhost:7455'
|
||||
depends_on:
|
||||
- master
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 9326:9326
|
||||
command: 'filer -master="master:9333" -metricsPort=9326'
|
||||
tty: true
|
||||
stdin_open: true
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
mount_1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
privileged: true
|
||||
entrypoint: '/bin/sh -c "mkdir -p t1 && mkdir -p cache/t1 && weed -v=4 mount -filer=filer:8888 -cacheDir=./cache/t1 -dir=./t1 -filer.path=/c1 -volumeServerAccess=filerProxy"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
mount_2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
privileged: true
|
||||
entrypoint: '/bin/sh -c "mkdir -p t2 && mkdir -p cache/t2 && weed -v=4 mount -filer=filer:8888 -cacheDir=./cache/t2 -dir=./t2 -filer.path=/c1"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
- mount_1
|
|
@ -1,47 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 7455:8080
|
||||
- 9325:9325
|
||||
volumes:
|
||||
- /Volumes/mobile_disk/99:/data
|
||||
command: 'volume -mserver="master:9333" -port=8080 -metricsPort=9325 -preStopSeconds=1 -publicUrl=localhost:7455'
|
||||
depends_on:
|
||||
- master
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 9326:9326
|
||||
volumes:
|
||||
- /Volumes/mobile_disk/99:/data
|
||||
command: 'filer -master="master:9333" -metricsPort=9326'
|
||||
tty: true
|
||||
stdin_open: true
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
mount:
|
||||
image: chrislusf/seaweedfs:local
|
||||
privileged: true
|
||||
cap_add:
|
||||
- SYS_ADMIN
|
||||
devices:
|
||||
- fuse
|
||||
volumes:
|
||||
- /Volumes/mobile_disk/99:/data
|
||||
entrypoint: '/bin/sh -c "mkdir -p t1 && weed -v=4 mount -filer=filer:8888 -dir=./t1 -cacheCapacityMB=0 -memprofile=/data/mount.mem.pprof"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
|
@ -1,32 +0,0 @@
|
|||
services:
|
||||
server:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: "server -ip=server -filer -volume.max=0 -master.volumeSizeLimitMB=8 -volume.preStopSeconds=1"
|
||||
healthcheck:
|
||||
test: curl -f http://localhost:8888/healthz
|
||||
mq_broker:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 17777:17777
|
||||
command: "mq.broker -master=server:9333 -ip=mq_broker"
|
||||
depends_on:
|
||||
server:
|
||||
condition: service_healthy
|
||||
mq_agent:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 16777:16777
|
||||
command: "mq.agent -broker=mq_broker:17777 -port=16777"
|
||||
depends_on:
|
||||
- mq_broker
|
||||
mq_client:
|
||||
image: chrislusf/seaweedfs:local
|
||||
# run a custom command instead of entrypoint
|
||||
command: "ls -al"
|
||||
depends_on:
|
||||
- mq_agent
|
|
@ -1,44 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: "volume -mserver=master:9333 -port=8080 -ip=volume"
|
||||
depends_on:
|
||||
- master
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 8333:8333
|
||||
command: '-v 9 filer -master="master:9333" -s3'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
nextcloud:
|
||||
image: nextcloud:23.0.5-apache
|
||||
environment:
|
||||
- OBJECTSTORE_S3_HOST=s3
|
||||
- OBJECTSTORE_S3_BUCKET=nextcloud
|
||||
- OBJECTSTORE_S3_KEY=some_access_key1
|
||||
- OBJECTSTORE_S3_SECRET=some_secret_key1
|
||||
- OBJECTSTORE_S3_PORT=8333
|
||||
- OBJECTSTORE_S3_SSL=false
|
||||
- OBJECTSTORE_S3_USEPATH_STYLE=true
|
||||
- SQLITE_DATABASE=nextcloud
|
||||
- NEXTCLOUD_ADMIN_USER=admin
|
||||
- NEXTCLOUD_ADMIN_PASSWORD=admin
|
||||
ports:
|
||||
- 80:80
|
||||
depends_on:
|
||||
- s3
|
|
@ -1,85 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master -volumeSizeLimitMB=100"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: "volume -mserver=master:9333 -port=8080 -ip=volume -max=0 -preStopSeconds=1"
|
||||
depends_on:
|
||||
- master
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 8333:8333
|
||||
command: '-v 9 filer -master="master:9333" -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8333'
|
||||
volumes:
|
||||
- ./s3.json:/etc/seaweedfs/s3.json
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
minio:
|
||||
image: minio/minio
|
||||
ports:
|
||||
- 9000:9000
|
||||
command: 'minio server /data'
|
||||
environment:
|
||||
MINIO_ACCESS_KEY: "some_access_key1"
|
||||
MINIO_SECRET_KEY: "some_secret_key1"
|
||||
depends_on:
|
||||
- master
|
||||
registry1:
|
||||
image: registry:2
|
||||
environment:
|
||||
REGISTRY_HTTP_ADDR: "0.0.0.0:5001" # seaweedfs s3
|
||||
REGISTRY_LOG_LEVEL: "debug"
|
||||
REGISTRY_STORAGE: "s3"
|
||||
REGISTRY_STORAGE_S3_REGION: "us-east-1"
|
||||
REGISTRY_STORAGE_S3_REGIONENDPOINT: "http://s3:8333"
|
||||
REGISTRY_STORAGE_S3_BUCKET: "registry"
|
||||
REGISTRY_STORAGE_S3_ACCESSKEY: "some_access_key1"
|
||||
REGISTRY_STORAGE_S3_SECRETKEY: "some_secret_key1"
|
||||
REGISTRY_STORAGE_S3_V4AUTH: "true"
|
||||
REGISTRY_STORAGE_S3_SECURE: "false"
|
||||
REGISTRY_STORAGE_S3_SKIPVERIFY: "true"
|
||||
REGISTRY_STORAGE_S3_ROOTDIRECTORY: "/"
|
||||
REGISTRY_STORAGE_DELETE_ENABLED: "true"
|
||||
REGISTRY_STORAGE_REDIRECT_DISABLE: "true"
|
||||
REGISTRY_VALIDATION_DISABLED: "true"
|
||||
ports:
|
||||
- 5001:5001
|
||||
depends_on:
|
||||
- s3
|
||||
- minio
|
||||
registry2:
|
||||
image: registry:2
|
||||
environment:
|
||||
REGISTRY_HTTP_ADDR: "0.0.0.0:5002" # minio
|
||||
REGISTRY_LOG_LEVEL: "debug"
|
||||
REGISTRY_STORAGE: "s3"
|
||||
REGISTRY_STORAGE_S3_REGION: "us-east-1"
|
||||
REGISTRY_STORAGE_S3_REGIONENDPOINT: "http://minio:9000"
|
||||
REGISTRY_STORAGE_S3_BUCKET: "registry"
|
||||
REGISTRY_STORAGE_S3_ACCESSKEY: "some_access_key1"
|
||||
REGISTRY_STORAGE_S3_SECRETKEY: "some_secret_key1"
|
||||
REGISTRY_STORAGE_S3_V4AUTH: "true"
|
||||
REGISTRY_STORAGE_S3_SECURE: "false"
|
||||
REGISTRY_STORAGE_S3_SKIPVERIFY: "true"
|
||||
REGISTRY_STORAGE_S3_ROOTDIRECTORY: "/"
|
||||
REGISTRY_STORAGE_DELETE_ENABLED: "true"
|
||||
REGISTRY_STORAGE_REDIRECT_DISABLE: "true"
|
||||
REGISTRY_VALIDATION_DISABLED: "true"
|
||||
ports:
|
||||
- 5002:5002
|
||||
depends_on:
|
||||
- s3
|
||||
- minio
|
|
@ -1,61 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: "volume -mserver=master:9333 -port=8080 -ip=volume -preStopSeconds=1"
|
||||
depends_on:
|
||||
- master
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: '-v=9 filer -master="master:9333"'
|
||||
restart: on-failure
|
||||
volumes:
|
||||
- ./notification.toml:/etc/seaweedfs/notification.toml
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- rabbitmq
|
||||
- replicate
|
||||
environment:
|
||||
RABBIT_SERVER_URL: "amqp://guest:guest@rabbitmq:5672/"
|
||||
replicate:
|
||||
image: chrislusf/seaweedfs:local
|
||||
command: '-v=9 filer.replicate'
|
||||
restart: on-failure
|
||||
volumes:
|
||||
- ./notification.toml:/etc/seaweedfs/notification.toml
|
||||
- ./replication.toml:/etc/seaweedfs/replication.toml
|
||||
depends_on:
|
||||
- rabbitmq
|
||||
environment:
|
||||
RABBIT_SERVER_URL: "amqp://guest:guest@rabbitmq:5672/"
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: 's3 -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
rabbitmq:
|
||||
image: rabbitmq:3.8.10-management-alpine
|
||||
ports:
|
||||
- 5672:5672
|
||||
- 15671:15671
|
||||
- 15672:15672
|
||||
environment:
|
||||
RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS: "-rabbit log_levels [{connection,error},{queue,debug}]"
|
|
@ -1,45 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master -volumeSizeLimitMB=16"
|
||||
environment:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: "volume -mserver=master:9333 -port=8080 -ip=volume -preStopSeconds=1"
|
||||
depends_on:
|
||||
- master
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 8000:8000
|
||||
command: 'filer -master="master:9333" -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8000 -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=false'
|
||||
volumes:
|
||||
- ./s3.json:/etc/seaweedfs/s3.json
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
s3tests:
|
||||
image: chrislusf/ceph-s3-tests:local
|
||||
volumes:
|
||||
- ./s3tests.conf:/opt/s3-tests/s3tests.conf
|
||||
environment:
|
||||
S3TEST_CONF: "s3tests.conf"
|
||||
NOSETESTS_OPTIONS: "--verbose --logging-level=ERROR --with-xunit --failure-detail s3tests_boto3.functional.test_s3"
|
||||
NOSETESTS_ATTR: "!fails_on_aws,!encryption,!bucket-policy,!versioning,!fails_on_rgw,!bucket-policy,!fails_with_subdomain,!policy_status,!object-lock,!lifecycle,!cors,!user-policy"
|
||||
NOSETESTS_EXCLUDE: "(post_object_tags_anonymous_request|get_obj_tagging|set_bucket_tagging|post_object_tags_authenticated_request|put_max_tags|put_modify_tags|test_put_obj_with_tags|get_bucket_encryption|delete_bucket_encryption|put_bucket_encryption|bucket_list_delimiter_basic|bucket_listv2_delimiter_basic|bucket_listv2_encoding_basic|bucket_list_encoding_basic|bucket_list_delimiter_prefix|bucket_listv2_delimiter_prefix_ends_with_delimiter|bucket_list_delimiter_prefix_ends_with_delimiter|bucket_list_delimiter_alt|bucket_listv2_delimiter_alt|bucket_list_delimiter_prefix_underscore|bucket_list_delimiter_percentage|bucket_listv2_delimiter_percentage|bucket_list_delimiter_whitespace|bucket_listv2_delimiter_whitespace|bucket_list_delimiter_dot|bucket_listv2_delimiter_dot|bucket_list_delimiter_unreadable|bucket_listv2_delimiter_unreadable|bucket_listv2_fetchowner_defaultempty|bucket_listv2_fetchowner_empty|bucket_list_prefix_delimiter_alt|bucket_listv2_prefix_delimiter_alt|bucket_list_prefix_delimiter_prefix_not_exist|bucket_listv2_prefix_delimiter_prefix_not_exist|bucket_list_prefix_delimiter_delimiter_not_exist|bucket_listv2_prefix_delimiter_delimiter_not_exist|bucket_list_prefix_delimiter_prefix_delimiter_not_exist|bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist|bucket_list_maxkeys_none|bucket_listv2_maxkeys_none|bucket_list_maxkeys_invalid|bucket_listv2_continuationtoken_empty|bucket_list_return_data|bucket_list_objects_anonymous|bucket_listv2_objects_anonymous|bucket_concurrent_set_canned_acl|object_write_to_nonexist_bucket|object_requestid_matches_header_on_error|object_set_get_metadata_none_to_good|object_set_get_metadata_none_to_empty|object_set_get_metadata_overwrite_to_empty|post_object_anonymous_request|post_object_authenticated_request|post_object_authenticated_no_content_type|post_object_authenticated_request_bad_access_key|post_object_set_success_code|post_object_set_invalid_success_code|post_object_upload_larger_than_chunk|post_object_set_key_from_filename|post_object_ignored_header|post_object_case_insensitive_condition_fields|post_object_escaped_field_values|post_object_success_redirect_action|post_object_invalid_signature|post_object_invalid_access_key|post_object_missing_policy_condition|post_object_user_specified_header|post_object_request_missing_policy_specified_field|post_object_expired_policy|post_object_invalid_request_field_value|get_object_ifunmodifiedsince_good|put_object_ifmatch_failed|object_raw_get_bucket_gone|object_delete_key_bucket_gone|object_raw_get_bucket_acl|object_raw_get_object_acl|object_raw_response_headers|object_raw_authenticated_bucket_gone|object_raw_get_x_amz_expires_out_max_range|object_raw_get_x_amz_expires_out_positive_range|object_anon_put_write_access|object_raw_put_authenticated_expired|bucket_create_exists|bucket_create_naming_bad_short_one|bucket_create_naming_bad_short_two|bucket_get_location|bucket_acl_default|bucket_acl_canned|bucket_acl_canned_publicreadwrite|bucket_acl_canned_authenticatedread|object_acl_default|object_acl_canned_during_create|object_acl_canned|object_acl_canned_publicreadwrite|object_acl_canned_authenticatedread|object_acl_canned_bucketownerread|object_acl_canned_bucketownerfullcontrol|object_acl_full_control_verify_attributes|bucket_acl_canned_private_to_private|bucket_acl_grant_nonexist_user|bucket_acl_no_grants|bucket_acl_grant_email_not_exist|bucket_acl_revoke_all|bucket_recreate_not_overriding|object_copy_verify_contenttype|object_copy_to_itself_with_metadata|object_copy_not_owned_bucket|object_copy_not_owned_object_bucket|object_copy_retaining_metadata|object_copy_replacing_metadata|multipart_upload_empty|multipart_copy_invalid_range|multipart_copy_special_names|multipart_upload_resend_part|multipart_upload_size_too_small|abort_multipart_upload_not_found|multipart_upload_missing_part|100_continue|ranged_request_invalid_range|ranged_request_empty_object|access_bucket|list_multipart_upload_owner|multipart_upload_small)"
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- s3
|
|
@ -1,56 +0,0 @@
|
|||
version: '3.9'
|
||||
services:
|
||||
node1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
command: "server -master -volume -filer"
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "--fail", "-I", "http://localhost:9333/cluster/healthz" ]
|
||||
interval: 1s
|
||||
start_period: 10s
|
||||
timeout: 30s
|
||||
mount1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
privileged: true
|
||||
command: "mount -filer=node1:8888 -dir=/mnt -dirAutoCreate"
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "--fail", "-I", "http://node1:8888/" ]
|
||||
interval: 1s
|
||||
start_period: 10s
|
||||
timeout: 30s
|
||||
depends_on:
|
||||
node1:
|
||||
condition: service_healthy
|
||||
node2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 7888:8888
|
||||
- 17888:18888
|
||||
command: "server -master -volume -filer"
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "--fail", "-I", "http://localhost:9333/cluster/healthz" ]
|
||||
interval: 1s
|
||||
start_period: 10s
|
||||
timeout: 30s
|
||||
mount2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
privileged: true
|
||||
command: "mount -filer=node2:8888 -dir=/mnt -dirAutoCreate"
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "--fail", "-I", "http://node2:8888/" ]
|
||||
interval: 1s
|
||||
start_period: 10s
|
||||
timeout: 30s
|
||||
depends_on:
|
||||
node2:
|
||||
condition: service_healthy
|
||||
sync:
|
||||
image: chrislusf/seaweedfs:local
|
||||
command: "-v=4 filer.sync -a=node1:8888 -b=node2:8888 -a.debug -b.debug"
|
||||
depends_on:
|
||||
mount1:
|
||||
condition: service_healthy
|
||||
mount2:
|
||||
condition: service_healthy
|
|
@ -1,31 +0,0 @@
|
|||
|
||||
# Put this file to one of the location, with descending priority
|
||||
# ./master.toml
|
||||
# $HOME/.seaweedfs/master.toml
|
||||
# /etc/seaweedfs/master.toml
|
||||
# this file is read by master
|
||||
|
||||
[master.maintenance]
|
||||
# periodically run these scripts are the same as running them from 'weed shell'
|
||||
scripts = """
|
||||
lock
|
||||
ec.encode -fullPercent=95 -quietFor=1h
|
||||
ec.rebuild -force
|
||||
ec.balance -force
|
||||
volume.balance -force
|
||||
volume.fix.replication
|
||||
unlock
|
||||
"""
|
||||
sleep_minutes = 17 # sleep minutes between each script execution
|
||||
|
||||
# configurations for tiered cloud storage
|
||||
# old volumes are transparently moved to cloud for cost efficiency
|
||||
[storage.backend]
|
||||
[storage.backend.s3.default]
|
||||
enabled = true
|
||||
aws_access_key_id = "any" # if empty, loads from the shared credentials file (~/.aws/credentials).
|
||||
aws_secret_access_key = "any" # if empty, loads from the shared credentials file (~/.aws/credentials).
|
||||
region = "us-east-2"
|
||||
bucket = "volume_bucket" # an existing bucket
|
||||
endpoint = "http://server2:8333"
|
||||
storage_class = "STANDARD_IA"
|
|
@ -1,17 +0,0 @@
|
|||
[notification.log]
|
||||
# this is only for debugging purpose and does not work with "weed filer.replicate"
|
||||
enabled = false
|
||||
|
||||
|
||||
[notification.gocdk_pub_sub]
|
||||
# The Go Cloud Development Kit (https://gocloud.dev).
|
||||
# PubSub API (https://godoc.org/gocloud.dev/pubsub).
|
||||
# Supports AWS SNS/SQS, Azure Service Bus, Google PubSub, NATS and RabbitMQ.
|
||||
enabled = true
|
||||
# This URL will Dial the RabbitMQ server at the URL in the environment
|
||||
# variable RABBIT_SERVER_URL and open the exchange "myexchange".
|
||||
# The exchange must have already been created by some other means, like
|
||||
# the RabbitMQ management plugin. Сreate myexchange of type fanout and myqueue then
|
||||
# create binding myexchange => myqueue
|
||||
topic_url = "rabbit://swexchange"
|
||||
sub_url = "rabbit://swqueue"
|
|
@ -1,11 +0,0 @@
|
|||
[source.filer]
|
||||
enabled = true
|
||||
grpcAddress = "filer:18888"
|
||||
# all files under this directory tree are replicated.
|
||||
# this is not a directory on your hard drive, but on your filer.
|
||||
# i.e., all files with this "prefix" are sent to notification message queue.
|
||||
directory = "/buckets"
|
||||
|
||||
[sink.local_incremental]
|
||||
enabled = true
|
||||
directory = "/data"
|
|
@ -1,115 +0,0 @@
|
|||
{
|
||||
"identities": [
|
||||
{
|
||||
"name": "anonymous",
|
||||
"actions": [
|
||||
"Read"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "some_admin_user",
|
||||
"credentials": [
|
||||
{
|
||||
"accessKey": "some_access_key1",
|
||||
"secretKey": "some_secret_key1"
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
"Admin",
|
||||
"Read",
|
||||
"List",
|
||||
"Tagging",
|
||||
"Write"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "s3_tests",
|
||||
"credentials": [
|
||||
{
|
||||
"accessKey": "ABCDEFGHIJKLMNOPQRST",
|
||||
"secretKey": "abcdefghijklmnopqrstuvwxyzabcdefghijklmn"
|
||||
},
|
||||
{
|
||||
"accessKey": "0555b35654ad1656d804",
|
||||
"secretKey": "h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=="
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
"Admin",
|
||||
"Read",
|
||||
"List",
|
||||
"Tagging",
|
||||
"Write"
|
||||
],
|
||||
"account": {
|
||||
"id": "testid"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "s3_tests_alt",
|
||||
"credentials": [
|
||||
{
|
||||
"accessKey": "NOPQRSTUVWXYZABCDEFG",
|
||||
"secretKey": "nopqrstuvwxyzabcdefghijklmnabcdefghijklm"
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
"Admin",
|
||||
"Read",
|
||||
"List",
|
||||
"Tagging",
|
||||
"Write"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "s3_tests_tenant",
|
||||
"credentials": [
|
||||
{
|
||||
"accessKey": "HIJKLMNOPQRSTUVWXYZA",
|
||||
"secretKey": "opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab"
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
"Admin",
|
||||
"Read",
|
||||
"List",
|
||||
"Tagging",
|
||||
"Write"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "some_read_only_user",
|
||||
"credentials": [
|
||||
{
|
||||
"accessKey": "some_access_key2",
|
||||
"secretKey": "some_secret_key2"
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
"Read"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "some_normal_user",
|
||||
"credentials": [
|
||||
{
|
||||
"accessKey": "some_access_key3",
|
||||
"secretKey": "some_secret_key3"
|
||||
}
|
||||
],
|
||||
"actions": [
|
||||
"Read",
|
||||
"List",
|
||||
"Tagging",
|
||||
"Write"
|
||||
]
|
||||
}
|
||||
],
|
||||
"accounts": [
|
||||
{
|
||||
"id" : "testid",
|
||||
"displayName": "M. Tester",
|
||||
"emailAddress": "tester@ceph.com"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,103 +0,0 @@
|
|||
[DEFAULT]
|
||||
## this section is just used for host, port and bucket_prefix
|
||||
|
||||
# host set for rgw in vstart.sh
|
||||
host = 127.0.0.1
|
||||
|
||||
# port set for rgw in vstart.sh
|
||||
port = 8000
|
||||
|
||||
## say "False" to disable TLS
|
||||
is_secure = False
|
||||
|
||||
[fixtures]
|
||||
## all the buckets created will start with this prefix;
|
||||
## {random} will be filled with random characters to pad
|
||||
## the prefix to 30 characters long, and avoid collisions
|
||||
bucket prefix = yournamehere-{random}-
|
||||
|
||||
[s3 main]
|
||||
# main display_name set in vstart.sh
|
||||
display_name = M. Tester
|
||||
|
||||
# main user_idname set in vstart.sh
|
||||
user_id = testid
|
||||
|
||||
# main email set in vstart.sh
|
||||
email = tester@ceph.com
|
||||
|
||||
# zonegroup api_name for bucket location
|
||||
api_name = default
|
||||
|
||||
## main AWS access key
|
||||
access_key = 0555b35654ad1656d804
|
||||
|
||||
## main AWS secret key
|
||||
secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==
|
||||
|
||||
## replace with key id obtained when secret is created, or delete if KMS not tested
|
||||
#kms_keyid = 01234567-89ab-cdef-0123-456789abcdef
|
||||
|
||||
[s3 alt]
|
||||
# alt display_name set in vstart.sh
|
||||
display_name = john.doe
|
||||
## alt email set in vstart.sh
|
||||
email = john.doe@example.com
|
||||
|
||||
# alt user_id set in vstart.sh
|
||||
user_id = 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234
|
||||
|
||||
# alt AWS access key set in vstart.sh
|
||||
access_key = NOPQRSTUVWXYZABCDEFG
|
||||
|
||||
# alt AWS secret key set in vstart.sh
|
||||
secret_key = nopqrstuvwxyzabcdefghijklmnabcdefghijklm
|
||||
|
||||
[s3 tenant]
|
||||
# tenant display_name set in vstart.sh
|
||||
display_name = testx$tenanteduser
|
||||
|
||||
# tenant user_id set in vstart.sh
|
||||
user_id = 9876543210abcdef0123456789abcdef0123456789abcdef0123456789abcdef
|
||||
|
||||
# tenant AWS secret key set in vstart.sh
|
||||
access_key = HIJKLMNOPQRSTUVWXYZA
|
||||
|
||||
# tenant AWS secret key set in vstart.sh
|
||||
secret_key = opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab
|
||||
|
||||
# tenant email set in vstart.sh
|
||||
email = tenanteduser@example.com
|
||||
|
||||
# tenant name
|
||||
tenant = testx
|
||||
|
||||
[iam]
|
||||
#used for iam operations in sts-tests
|
||||
#email from vstart.sh
|
||||
email = s3@example.com
|
||||
|
||||
#user_id from vstart.sh
|
||||
user_id = 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef
|
||||
|
||||
#access_key from vstart.sh
|
||||
access_key = ABCDEFGHIJKLMNOPQRST
|
||||
|
||||
#secret_key from vstart.sh
|
||||
secret_key = abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz
|
||||
|
||||
#display_name from vstart.sh
|
||||
display_name = youruseridhere
|
||||
|
||||
[iam root]
|
||||
access_key = AAAAAAAAAAAAAAAAAAaa
|
||||
secret_key = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
user_id = RGW11111111111111111
|
||||
email = account1@ceph.com
|
||||
|
||||
# iam account root user in a different account than [iam root]
|
||||
[iam alt root]
|
||||
access_key = BBBBBBBBBBBBBBBBBBbb
|
||||
secret_key = bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
|
||||
user_id = RGW22222222222222222
|
||||
email = account2@ceph.com
|
|
@ -1,84 +0,0 @@
|
|||
# 2021-01-30 16:25:30
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
|
||||
etcd:
|
||||
image: gasparekatapy/etcd
|
||||
networks:
|
||||
- net
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 3
|
||||
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
environment:
|
||||
WEED_MASTER_FILER_DEFAULT: "filer:8888"
|
||||
WEED_MASTER_SEQUENCER_TYPE: "raft"
|
||||
ports:
|
||||
- "9333:9333"
|
||||
- "19333:19333"
|
||||
networks:
|
||||
- net
|
||||
command:
|
||||
- 'master'
|
||||
- '-resumeState=true'
|
||||
- '-ip=master'
|
||||
- '-port=9333'
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
environment:
|
||||
WEED_LEVELDB2_ENABLED: "false"
|
||||
WEED_ETCD_ENABLED: "true"
|
||||
WEED_ETCD_SERVERS: "etcd:2379"
|
||||
ports:
|
||||
- target: 8888
|
||||
published: 8888
|
||||
protocol: tcp
|
||||
mode: host
|
||||
- target: 18888
|
||||
published: 18888
|
||||
protocol: tcp
|
||||
mode: host
|
||||
networks:
|
||||
- net
|
||||
command:
|
||||
- 'filer'
|
||||
- '-ip=filer'
|
||||
- '-port=8888'
|
||||
- '-port.readonly=28888'
|
||||
- '-master=master:9333'
|
||||
- '-disableDirListing=true'
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- target: 8080
|
||||
published: 8080
|
||||
protocol: tcp
|
||||
mode: host
|
||||
- target: 18080
|
||||
published: 18080
|
||||
protocol: tcp
|
||||
mode: host
|
||||
networks:
|
||||
- net
|
||||
command:
|
||||
- 'volume'
|
||||
- '-mserver=master:9333'
|
||||
- '-port=8080'
|
||||
deploy:
|
||||
mode: global
|
||||
|
||||
###########################################################################
|
||||
|
||||
networks:
|
||||
net:
|
|
@ -1,62 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
etcd:
|
||||
image: quay.io/coreos/etcd:v3.5.4
|
||||
command: "etcd --advertise-client-urls http://etcd:2379 --listen-client-urls http://0.0.0.0:2379"
|
||||
ports:
|
||||
- 2379:2379
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master -volumeSizeLimitMB=100"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: "volume -mserver=master:9333 -port=8080 -ip=volume -max=0 -preStopSeconds=1"
|
||||
depends_on:
|
||||
- master
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 8333:8333
|
||||
command: '-v 9 filer -master="master:9333" -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8333'
|
||||
environment:
|
||||
WEED_LEVELDB2_ENABLED: 'false'
|
||||
WEED_ETCD_ENABLED: 'true'
|
||||
WEED_ETCD_KEY_PREFIX: 'seaweedfs.'
|
||||
WEED_ETCD_SERVERS: "http://etcd:2379"
|
||||
volumes:
|
||||
- ./s3.json:/etc/seaweedfs/s3.json
|
||||
depends_on:
|
||||
- etcd
|
||||
- master
|
||||
- volume
|
||||
registry:
|
||||
image: registry:2
|
||||
environment:
|
||||
REGISTRY_HTTP_ADDR: "0.0.0.0:5001" # seaweedfs s3
|
||||
REGISTRY_LOG_LEVEL: "debug"
|
||||
REGISTRY_STORAGE: "s3"
|
||||
REGISTRY_STORAGE_S3_REGION: "us-east-1"
|
||||
REGISTRY_STORAGE_S3_REGIONENDPOINT: "http://s3:8333"
|
||||
REGISTRY_STORAGE_S3_BUCKET: "registry"
|
||||
REGISTRY_STORAGE_S3_ACCESSKEY: "some_access_key1"
|
||||
REGISTRY_STORAGE_S3_SECRETKEY: "some_secret_key1"
|
||||
REGISTRY_STORAGE_S3_V4AUTH: "true"
|
||||
REGISTRY_STORAGE_S3_SECURE: "false"
|
||||
REGISTRY_STORAGE_S3_SKIPVERIFY: "true"
|
||||
REGISTRY_STORAGE_S3_ROOTDIRECTORY: "/"
|
||||
REGISTRY_STORAGE_DELETE_ENABLED: "true"
|
||||
REGISTRY_STORAGE_REDIRECT_DISABLE: "true"
|
||||
REGISTRY_VALIDATION_DISABLED: "true"
|
||||
ports:
|
||||
- 5001:5001
|
||||
depends_on:
|
||||
- s3
|
|
@ -1,30 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
tarantool:
|
||||
image: chrislusf/tarantool_dev_env
|
||||
entrypoint: "tt start app -i"
|
||||
environment:
|
||||
APP_USER_PASSWORD: "app"
|
||||
CLIENT_USER_PASSWORD: "client"
|
||||
REPLICATOR_USER_PASSWORD: "replicator"
|
||||
STORAGE_USER_PASSWORD: "storage"
|
||||
network_mode: "host"
|
||||
ports:
|
||||
- "3303:3303"
|
||||
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
command: "server -ip=127.0.0.1 -filer -master.volumeSizeLimitMB=16 -volume.max=0 -volume -volume.preStopSeconds=1 -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8000 -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=false"
|
||||
volumes:
|
||||
- ./s3.json:/etc/seaweedfs/s3.json
|
||||
environment:
|
||||
WEED_LEVELDB2_ENABLED: "false"
|
||||
WEED_TARANTOOL_ENABLED: "true"
|
||||
WEED_TARANTOOL_ADDRESS: "127.0.0.1:3303"
|
||||
WEED_TARANTOOL_USER: "client"
|
||||
WEED_TARANTOOL_PASSWORD: "client"
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
network_mode: "host"
|
||||
depends_on:
|
||||
- tarantool
|
|
@ -1,35 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
ydb:
|
||||
image: cr.yandex/yc/yandex-docker-local-ydb
|
||||
ports:
|
||||
- 2135:2135
|
||||
- 8765:8765
|
||||
- 2136:2136
|
||||
environment:
|
||||
- YDB_DEFAULT_LOG_LEVEL=DEBUG
|
||||
- GRPC_TLS_PORT=2135
|
||||
- GRPC_PORT=2136
|
||||
- MON_PORT=8765
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
- 8888:8888
|
||||
- 8000:8000
|
||||
- 18888:18888
|
||||
command: "server -ip=s3 -filer -master.volumeSizeLimitMB=16 -volume.max=0 -volume -volume.preStopSeconds=1 -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8000 -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=false"
|
||||
volumes:
|
||||
- ./s3.json:/etc/seaweedfs/s3.json
|
||||
environment:
|
||||
WEED_LEVELDB2_ENABLED: "false"
|
||||
WEED_YDB_ENABLED: "true"
|
||||
WEED_YDB_DSN: "grpc://ydb:2136/?database=local"
|
||||
WEED_YDB_PREFIX: "seaweedfs"
|
||||
YDB_ANONYMOUS_CREDENTIALS: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
depends_on:
|
||||
- ydb
|
|
@ -1,20 +0,0 @@
|
|||
WEED_GRPC_CA=/etc/seaweedfs/tls/SeaweedFS_CA.crt
|
||||
WEED_GRPC_ALLOWED_WILDCARD_DOMAIN=".dev"
|
||||
WEED_GRPC_MASTER_CERT=/etc/seaweedfs/tls/master01.dev.crt
|
||||
WEED_GRPC_MASTER_KEY=/etc/seaweedfs/tls/master01.dev.key
|
||||
WEED_GRPC_VOLUME_CERT=/etc/seaweedfs/tls/volume01.dev.crt
|
||||
WEED_GRPC_VOLUME_KEY=/etc/seaweedfs/tls/volume01.dev.key
|
||||
WEED_GRPC_FILER_CERT=/etc/seaweedfs/tls/filer01.dev.crt
|
||||
WEED_GRPC_FILER_KEY=/etc/seaweedfs/tls/filer01.dev.key
|
||||
WEED_GRPC_CLIENT_CERT=/etc/seaweedfs/tls/client01.dev.crt
|
||||
WEED_GRPC_CLIENT_KEY=/etc/seaweedfs/tls/client01.dev.key
|
||||
WEED_GRPC_MASTER_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev"
|
||||
WEED_GRPC_VOLUME_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev"
|
||||
WEED_GRPC_FILER_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev"
|
||||
WEED_GRPC_CLIENT_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev"
|
||||
WEED_HTTPS_CLIENT_ENABLE=true
|
||||
WEED_HTTPS_VOLUME_CERT=/etc/seaweedfs/tls/volume01.dev.crt
|
||||
WEED_HTTPS_VOLUME_KEY=/etc/seaweedfs/tls/volume01.dev.key
|
||||
WEED_HTTPS_VOLUME_CA=/etc/seaweedfs/tls/SeaweedFS_CA.crt
|
||||
#GRPC_GO_LOG_SEVERITY_LEVEL=info
|
||||
#GRPC_GO_LOG_VERBOSITY_LEVEL=2
|
|
@ -1,37 +0,0 @@
|
|||
[
|
||||
{
|
||||
"Username": "admin",
|
||||
"Password": "myadminpassword",
|
||||
"PublicKeys": [
|
||||
],
|
||||
"HomeDir": "/",
|
||||
"Permissions": {
|
||||
"/": ["*"]
|
||||
},
|
||||
"Uid": 0,
|
||||
"Gid": 0
|
||||
},
|
||||
{
|
||||
"Username": "user1",
|
||||
"Password": "myuser1password",
|
||||
"PublicKeys": [""],
|
||||
"HomeDir": "/user1",
|
||||
"Permissions": {
|
||||
"/user1": ["*"],
|
||||
"/public": ["read", "list","write"]
|
||||
},
|
||||
"Uid": 1111,
|
||||
"Gid": 1111
|
||||
},
|
||||
{
|
||||
"Username": "readonly",
|
||||
"Password": "myreadonlypassword",
|
||||
"PublicKeys": [],
|
||||
"HomeDir": "/public",
|
||||
"Permissions": {
|
||||
"/public": ["read", "list"]
|
||||
},
|
||||
"Uid": 1112,
|
||||
"Gid": 1112
|
||||
}
|
||||
]
|
40
docker/docker-compose.yml
Normal file
40
docker/docker-compose.yml
Normal file
|
@ -0,0 +1,40 @@
|
|||
version: '2'
|
||||
|
||||
services:
|
||||
master:
|
||||
#image: chrislusf/seaweedfs # use a remote image
|
||||
build: . # build our container from the local Dockerfile
|
||||
ports:
|
||||
- 9333:9333
|
||||
command: "master"
|
||||
networks:
|
||||
default:
|
||||
aliases:
|
||||
- seaweed_master
|
||||
volume:
|
||||
#image: chrislusf/seaweedfs # use a remote image
|
||||
build: . # build our container from the local Dockerfile
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: 'volume -max=5 -mserver="master:9333" -port=8080'
|
||||
depends_on:
|
||||
- master
|
||||
networks:
|
||||
default:
|
||||
aliases:
|
||||
- seaweed_volume
|
||||
filer:
|
||||
#image: chrislusf/seaweedfs # use a remote image
|
||||
build: . # build our container from the local Dockerfile
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: 'filer -master="master:9333"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
networks:
|
||||
default:
|
||||
aliases:
|
||||
- seaweed_filer
|
|
@ -1,66 +1,39 @@
|
|||
#!/bin/sh
|
||||
|
||||
isArgPassed() {
|
||||
arg="$1"
|
||||
argWithEqualSign="$1="
|
||||
shift
|
||||
while [ $# -gt 0 ]; do
|
||||
passedArg="$1"
|
||||
shift
|
||||
case $passedArg in
|
||||
$arg)
|
||||
return 0
|
||||
;;
|
||||
$argWithEqualSign*)
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
|
||||
'master')
|
||||
ARGS="-mdir=/data -volumePreallocate -volumeSizeLimitMB=1024"
|
||||
shift
|
||||
exec /usr/bin/weed -logtostderr=true master $ARGS $@
|
||||
ARGS="-ip `hostname -i` -mdir /data"
|
||||
# Is this instance linked with an other master? (Docker commandline "--link master1:master")
|
||||
if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then
|
||||
ARGS="$ARGS -peers=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT"
|
||||
fi
|
||||
exec /usr/bin/weed $@ $ARGS
|
||||
;;
|
||||
|
||||
'volume')
|
||||
ARGS="-dir=/data -max=0"
|
||||
if isArgPassed "-max" "$@"; then
|
||||
ARGS="-dir=/data"
|
||||
ARGS="-ip `hostname -i` -dir /data"
|
||||
# Is this instance linked with a master? (Docker commandline "--link master1:master")
|
||||
if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then
|
||||
ARGS="$ARGS -mserver=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT"
|
||||
fi
|
||||
shift
|
||||
exec /usr/bin/weed -logtostderr=true volume $ARGS $@
|
||||
exec /usr/bin/weed $@ $ARGS
|
||||
;;
|
||||
|
||||
'server')
|
||||
ARGS="-dir=/data -volume.max=0 -master.volumePreallocate -master.volumeSizeLimitMB=1024"
|
||||
if isArgPassed "-volume.max" "$@"; then
|
||||
ARGS="-dir=/data -master.volumePreallocate -master.volumeSizeLimitMB=1024"
|
||||
ARGS="-ip `hostname -i` -dir /data"
|
||||
if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then
|
||||
ARGS="$ARGS -master.peers=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT"
|
||||
fi
|
||||
shift
|
||||
exec /usr/bin/weed -logtostderr=true server $ARGS $@
|
||||
exec /usr/bin/weed $@ $ARGS
|
||||
;;
|
||||
|
||||
'filer')
|
||||
ARGS=""
|
||||
shift
|
||||
exec /usr/bin/weed -logtostderr=true filer $ARGS $@
|
||||
;;
|
||||
|
||||
's3')
|
||||
ARGS="-domainName=$S3_DOMAIN_NAME -key.file=$S3_KEY_FILE -cert.file=$S3_CERT_FILE"
|
||||
shift
|
||||
exec /usr/bin/weed -logtostderr=true s3 $ARGS $@
|
||||
;;
|
||||
|
||||
'shell')
|
||||
ARGS="-cluster=$SHELL_CLUSTER -filer=$SHELL_FILER -filerGroup=$SHELL_FILER_GROUP -master=$SHELL_MASTER -options=$SHELL_OPTIONS"
|
||||
shift
|
||||
exec echo "$@" | /usr/bin/weed -logtostderr=true shell $ARGS
|
||||
ARGS="-ip `hostname -i`"
|
||||
if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then
|
||||
ARGS="$ARGS -master=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT"
|
||||
fi
|
||||
exec /usr/bin/weed $@ $ARGS
|
||||
;;
|
||||
|
||||
*)
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
[leveldb2]
|
||||
[leveldb]
|
||||
enabled = true
|
||||
dir = "/data/filerldb2"
|
||||
dir = "."
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
[rocksdb]
|
||||
enabled = true
|
||||
dir = "/data/filer_rocksdb"
|
|
@ -1,30 +0,0 @@
|
|||
# HTTP 1.1 support
|
||||
proxy_http_version 1.1;
|
||||
#proxy_buffering off;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $proxy_connection;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $proxy_x_forwarded_proto;
|
||||
proxy_set_header X-Forwarded-Ssl $proxy_x_forwarded_ssl;
|
||||
proxy_set_header X-Forwarded-Port $proxy_x_forwarded_port;
|
||||
|
||||
# Mitigate httpoxy attack (see README for details)
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
# aws default max_concurrent_requests 10
|
||||
# aws default multipart_threshold 8MB
|
||||
proxy_buffering on; # GET buffering or “X-Accel-Buffering” enables or disables buffering of a response;
|
||||
proxy_buffers 64 1m; # buffers used for reading a response from the proxied server, for a single connection
|
||||
proxy_buffer_size 8k; # maximum size of the data that nginx can receive from the server at a time is set
|
||||
proxy_busy_buffers_size 2m;
|
||||
|
||||
proxy_request_buffering on; # PUT buffering
|
||||
client_body_buffer_size 64m; # buffer size for reading client request body
|
||||
client_max_body_size 64m;
|
||||
|
||||
proxy_next_upstream error timeout non_idempotent http_500; # PUT request should be passed to the next server:
|
||||
proxy_connect_timeout 200ms;
|
||||
proxy_read_timeout 3s; #timeout is set only between two successive read operations
|
||||
proxy_send_timeout 3s; #timeout is set only between two successive write operations
|
|
@ -1,14 +0,0 @@
|
|||
global:
|
||||
scrape_interval: 30s
|
||||
scrape_timeout: 10s
|
||||
|
||||
scrape_configs:
|
||||
- job_name: services
|
||||
metrics_path: /metrics
|
||||
static_configs:
|
||||
- targets:
|
||||
- 'prometheus:9090'
|
||||
- 'master:9324'
|
||||
- 'volume:9325'
|
||||
- 'filer:9326'
|
||||
- 's3:9327'
|
|
@ -1,59 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs # use a remote image
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
- 9324:9324
|
||||
command: "master -ip=master -ip.bind=0.0.0.0 -metricsPort=9324"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs # use a remote image
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
- 9325:9325
|
||||
command: 'volume -mserver="master:9333" -ip.bind=0.0.0.0 -port=8080 -metricsPort=9325'
|
||||
depends_on:
|
||||
- master
|
||||
filer:
|
||||
image: chrislusf/seaweedfs # use a remote image
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 9326:9326
|
||||
command: 'filer -master="master:9333" -ip.bind=0.0.0.0 -metricsPort=9326'
|
||||
tty: true
|
||||
stdin_open: true
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
s3:
|
||||
image: chrislusf/seaweedfs # use a remote image
|
||||
ports:
|
||||
- 8333:8333
|
||||
- 9327:9327
|
||||
command: 's3 -filer="filer:8888" -ip.bind=0.0.0.0 -metricsPort=9327'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
webdav:
|
||||
image: chrislusf/seaweedfs # use a remote image
|
||||
ports:
|
||||
- 7333:7333
|
||||
command: 'webdav -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
prometheus:
|
||||
image: prom/prometheus:v2.21.0
|
||||
ports:
|
||||
- 9000:9090
|
||||
volumes:
|
||||
- ./prometheus:/etc/prometheus
|
||||
command: --web.enable-lifecycle --config.file=/etc/prometheus/prometheus.yml
|
||||
depends_on:
|
||||
- s3
|
|
@ -1,44 +0,0 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:dev # use a remote dev image
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:dev # use a remote dev image
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: 'volume -mserver="master:9333" -port=8080 -ip=volume'
|
||||
depends_on:
|
||||
- master
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:dev # use a remote dev image
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: 'filer -master="master:9333" -ip.bind=0.0.0.0'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:dev # use a remote dev image
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: 's3 -filer="filer:8888" -ip.bind=0.0.0.0'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
webdav:
|
||||
image: chrislusf/seaweedfs:dev # use a remote dev image
|
||||
ports:
|
||||
- 7333:7333
|
||||
command: 'webdav -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
|
@ -1,12 +0,0 @@
|
|||
CREATE DATABASE IF NOT EXISTS seaweedfs;
|
||||
CREATE USER IF NOT EXISTS 'seaweedfs'@'%' IDENTIFIED BY 'secret';
|
||||
GRANT ALL PRIVILEGES ON seaweedfs.* TO 'seaweedfs'@'%';
|
||||
FLUSH PRIVILEGES;
|
||||
USE seaweedfs;
|
||||
CREATE TABLE IF NOT EXISTS `filemeta` (
|
||||
`dirhash` BIGINT NOT NULL COMMENT 'first 64 bits of MD5 hash value of directory field',
|
||||
`name` VARCHAR(766) NOT NULL COMMENT 'directory or file name',
|
||||
`directory` TEXT NOT NULL COMMENT 'full path to parent directory',
|
||||
`meta` LONGBLOB,
|
||||
PRIMARY KEY (`dirhash`, `name`)
|
||||
) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
|
|
@ -1,14 +0,0 @@
|
|||
package = 'app'
|
||||
version = 'scm-1'
|
||||
source = {
|
||||
url = '/dev/null',
|
||||
}
|
||||
dependencies = {
|
||||
'crud == 1.5.2-1',
|
||||
'expirationd == 1.6.0-1',
|
||||
'metrics-export-role == 0.3.0-1',
|
||||
'vshard == 0.1.32-1'
|
||||
}
|
||||
build = {
|
||||
type = 'none';
|
||||
}
|
|
@ -1,145 +0,0 @@
|
|||
config:
|
||||
context:
|
||||
app_user_password:
|
||||
from: env
|
||||
env: APP_USER_PASSWORD
|
||||
client_user_password:
|
||||
from: env
|
||||
env: CLIENT_USER_PASSWORD
|
||||
replicator_user_password:
|
||||
from: env
|
||||
env: REPLICATOR_USER_PASSWORD
|
||||
storage_user_password:
|
||||
from: env
|
||||
env: STORAGE_USER_PASSWORD
|
||||
|
||||
credentials:
|
||||
roles:
|
||||
crud-role:
|
||||
privileges:
|
||||
- permissions: [ "execute" ]
|
||||
lua_call: [ "crud.delete", "crud.get", "crud.upsert" ]
|
||||
users:
|
||||
app:
|
||||
password: '{{ context.app_user_password }}'
|
||||
roles: [ public, crud-role ]
|
||||
client:
|
||||
password: '{{ context.client_user_password }}'
|
||||
roles: [ super ]
|
||||
replicator:
|
||||
password: '{{ context.replicator_user_password }}'
|
||||
roles: [ replication ]
|
||||
storage:
|
||||
password: '{{ context.storage_user_password }}'
|
||||
roles: [ sharding ]
|
||||
|
||||
iproto:
|
||||
advertise:
|
||||
peer:
|
||||
login: replicator
|
||||
sharding:
|
||||
login: storage
|
||||
|
||||
sharding:
|
||||
bucket_count: 10000
|
||||
|
||||
metrics:
|
||||
include: [ all ]
|
||||
exclude: [ vinyl ]
|
||||
labels:
|
||||
alias: '{{ instance_name }}'
|
||||
|
||||
|
||||
groups:
|
||||
storages:
|
||||
roles:
|
||||
- roles.crud-storage
|
||||
- roles.expirationd
|
||||
- roles.metrics-export
|
||||
roles_cfg:
|
||||
roles.expirationd:
|
||||
cfg:
|
||||
metrics: true
|
||||
filer_metadata_task:
|
||||
space: filer_metadata
|
||||
is_expired: filer_metadata.is_expired
|
||||
options:
|
||||
atomic_iteration: true
|
||||
force: true
|
||||
index: 'expire_at_idx'
|
||||
iterator_type: GT
|
||||
start_key:
|
||||
- 0
|
||||
tuples_per_iteration: 10000
|
||||
app:
|
||||
module: storage
|
||||
sharding:
|
||||
roles: [ storage ]
|
||||
replication:
|
||||
failover: election
|
||||
database:
|
||||
use_mvcc_engine: true
|
||||
replicasets:
|
||||
storage-001:
|
||||
instances:
|
||||
storage-001-a:
|
||||
roles_cfg:
|
||||
roles.metrics-export:
|
||||
http:
|
||||
- listen: '0.0.0.0:8081'
|
||||
endpoints:
|
||||
- path: /metrics/prometheus/
|
||||
format: prometheus
|
||||
- path: /metrics/json
|
||||
format: json
|
||||
iproto:
|
||||
listen:
|
||||
- uri: 127.0.0.1:3301
|
||||
advertise:
|
||||
client: 127.0.0.1:3301
|
||||
storage-001-b:
|
||||
roles_cfg:
|
||||
roles.metrics-export:
|
||||
http:
|
||||
- listen: '0.0.0.0:8082'
|
||||
endpoints:
|
||||
- path: /metrics/prometheus/
|
||||
format: prometheus
|
||||
- path: /metrics/json
|
||||
format: json
|
||||
iproto:
|
||||
listen:
|
||||
- uri: 127.0.0.1:3302
|
||||
advertise:
|
||||
client: 127.0.0.1:3302
|
||||
routers:
|
||||
roles:
|
||||
- roles.crud-router
|
||||
- roles.metrics-export
|
||||
roles_cfg:
|
||||
roles.crud-router:
|
||||
stats: true
|
||||
stats_driver: metrics
|
||||
stats_quantiles: true
|
||||
app:
|
||||
module: router
|
||||
sharding:
|
||||
roles: [ router ]
|
||||
replicasets:
|
||||
router-001:
|
||||
instances:
|
||||
router-001-a:
|
||||
roles_cfg:
|
||||
roles.metrics-export:
|
||||
http:
|
||||
- listen: '0.0.0.0:8083'
|
||||
endpoints:
|
||||
- path: /metrics/prometheus/
|
||||
format: prometheus
|
||||
- path: /metrics/json
|
||||
format: json
|
||||
iproto:
|
||||
listen:
|
||||
- uri: 127.0.0.1:3303
|
||||
advertise:
|
||||
client: 127.0.0.1:3303
|
|
@ -1,7 +0,0 @@
|
|||
---
|
||||
storage-001-a:
|
||||
|
||||
storage-001-b:
|
||||
|
||||
router-001-a:
|
||||
|
|
@ -1,77 +0,0 @@
|
|||
local vshard = require('vshard')
|
||||
local log = require('log')
|
||||
|
||||
-- Bootstrap the vshard router.
|
||||
while true do
|
||||
local ok, err = vshard.router.bootstrap({
|
||||
if_not_bootstrapped = true,
|
||||
})
|
||||
if ok then
|
||||
break
|
||||
end
|
||||
log.info(('Router bootstrap error: %s'):format(err))
|
||||
end
|
||||
|
||||
-- functions for filer_metadata space
|
||||
local filer_metadata = {
|
||||
delete_by_directory_idx = function(directory)
|
||||
-- find all storages
|
||||
local storages = require('vshard').router.routeall()
|
||||
-- on each storage
|
||||
for _, storage in pairs(storages) do
|
||||
-- call local function
|
||||
local result, err = storage:callrw('filer_metadata.delete_by_directory_idx', { directory })
|
||||
-- check for error
|
||||
if err then
|
||||
error("Failed to call function on storage: " .. tostring(err))
|
||||
end
|
||||
end
|
||||
-- return
|
||||
return true
|
||||
end,
|
||||
find_by_directory_idx_and_name = function(dirPath, startFileName, includeStartFile, limit)
|
||||
-- init results
|
||||
local results = {}
|
||||
-- find all storages
|
||||
local storages = require('vshard').router.routeall()
|
||||
-- on each storage
|
||||
for _, storage in pairs(storages) do
|
||||
-- call local function
|
||||
local result, err = storage:callro('filer_metadata.find_by_directory_idx_and_name', {
|
||||
dirPath,
|
||||
startFileName,
|
||||
includeStartFile,
|
||||
limit
|
||||
})
|
||||
-- check for error
|
||||
if err then
|
||||
error("Failed to call function on storage: " .. tostring(err))
|
||||
end
|
||||
-- add to results
|
||||
for _, tuple in ipairs(result) do
|
||||
table.insert(results, tuple)
|
||||
end
|
||||
end
|
||||
-- sort
|
||||
table.sort(results, function(a, b) return a[3] < b[3] end)
|
||||
-- apply limit
|
||||
if #results > limit then
|
||||
local limitedResults = {}
|
||||
for i = 1, limit do
|
||||
table.insert(limitedResults, results[i])
|
||||
end
|
||||
results = limitedResults
|
||||
end
|
||||
-- return
|
||||
return results
|
||||
end,
|
||||
}
|
||||
|
||||
rawset(_G, 'filer_metadata', filer_metadata)
|
||||
|
||||
-- register functions for filer_metadata space, set grants
|
||||
for name, _ in pairs(filer_metadata) do
|
||||
box.schema.func.create('filer_metadata.' .. name, { if_not_exists = true })
|
||||
box.schema.user.grant('app', 'execute', 'function', 'filer_metadata.' .. name, { if_not_exists = true })
|
||||
box.schema.user.grant('client', 'execute', 'function', 'filer_metadata.' .. name, { if_not_exists = true })
|
||||
end
|
|
@ -1,97 +0,0 @@
|
|||
box.watch('box.status', function()
|
||||
if box.info.ro then
|
||||
return
|
||||
end
|
||||
|
||||
-- ====================================
|
||||
-- key_value space
|
||||
-- ====================================
|
||||
box.schema.create_space('key_value', {
|
||||
format = {
|
||||
{ name = 'key', type = 'string' },
|
||||
{ name = 'bucket_id', type = 'unsigned' },
|
||||
{ name = 'value', type = 'string' }
|
||||
},
|
||||
if_not_exists = true
|
||||
})
|
||||
|
||||
-- create key_value space indexes
|
||||
box.space.key_value:create_index('id', {type = 'tree', parts = { 'key' }, unique = true, if_not_exists = true})
|
||||
box.space.key_value:create_index('bucket_id', { type = 'tree', parts = { 'bucket_id' }, unique = false, if_not_exists = true })
|
||||
|
||||
-- ====================================
|
||||
-- filer_metadata space
|
||||
-- ====================================
|
||||
box.schema.create_space('filer_metadata', {
|
||||
format = {
|
||||
{ name = 'directory', type = 'string' },
|
||||
{ name = 'bucket_id', type = 'unsigned' },
|
||||
{ name = 'name', type = 'string' },
|
||||
{ name = 'expire_at', type = 'unsigned' },
|
||||
{ name = 'data', type = 'string' }
|
||||
},
|
||||
if_not_exists = true
|
||||
})
|
||||
|
||||
-- create filer_metadata space indexes
|
||||
box.space.filer_metadata:create_index('id', {type = 'tree', parts = { 'directory', 'name' }, unique = true, if_not_exists = true})
|
||||
box.space.filer_metadata:create_index('bucket_id', { type = 'tree', parts = { 'bucket_id' }, unique = false, if_not_exists = true })
|
||||
box.space.filer_metadata:create_index('directory_idx', { type = 'tree', parts = { 'directory' }, unique = false, if_not_exists = true })
|
||||
box.space.filer_metadata:create_index('name_idx', { type = 'tree', parts = { 'name' }, unique = false, if_not_exists = true })
|
||||
box.space.filer_metadata:create_index('expire_at_idx', { type = 'tree', parts = { 'expire_at' }, unique = false, if_not_exists = true})
|
||||
end)
|
||||
|
||||
-- functions for filer_metadata space
|
||||
local filer_metadata = {
|
||||
delete_by_directory_idx = function(directory)
|
||||
local space = box.space.filer_metadata
|
||||
local index = space.index.directory_idx
|
||||
-- for each finded directories
|
||||
for _, tuple in index:pairs({ directory }, { iterator = 'EQ' }) do
|
||||
space:delete({ tuple[1], tuple[3] })
|
||||
end
|
||||
return true
|
||||
end,
|
||||
find_by_directory_idx_and_name = function(dirPath, startFileName, includeStartFile, limit)
|
||||
local space = box.space.filer_metadata
|
||||
local directory_idx = space.index.directory_idx
|
||||
-- choose filter name function
|
||||
local filter_filename_func
|
||||
if includeStartFile then
|
||||
filter_filename_func = function(value) return value >= startFileName end
|
||||
else
|
||||
filter_filename_func = function(value) return value > startFileName end
|
||||
end
|
||||
-- init results
|
||||
local results = {}
|
||||
-- for each finded directories
|
||||
for _, tuple in directory_idx:pairs({ dirPath }, { iterator = 'EQ' }) do
|
||||
-- filter by name
|
||||
if filter_filename_func(tuple[3]) then
|
||||
table.insert(results, tuple)
|
||||
end
|
||||
end
|
||||
-- sort
|
||||
table.sort(results, function(a, b) return a[3] < b[3] end)
|
||||
-- apply limit
|
||||
if #results > limit then
|
||||
local limitedResults = {}
|
||||
for i = 1, limit do
|
||||
table.insert(limitedResults, results[i])
|
||||
end
|
||||
results = limitedResults
|
||||
end
|
||||
-- return
|
||||
return results
|
||||
end,
|
||||
is_expired = function(args, tuple)
|
||||
return (tuple[4] > 0) and (require('fiber').time() > tuple[4])
|
||||
end
|
||||
}
|
||||
|
||||
-- register functions for filer_metadata space, set grants
|
||||
rawset(_G, 'filer_metadata', filer_metadata)
|
||||
for name, _ in pairs(filer_metadata) do
|
||||
box.schema.func.create('filer_metadata.' .. name, { setuid = true, if_not_exists = true })
|
||||
box.schema.user.grant('storage', 'execute', 'function', 'filer_metadata.' .. name, { if_not_exists = true })
|
||||
end
|
274
docker/test.py
274
docker/test.py
|
@ -1,274 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
# /// script
|
||||
# requires-python = ">=3.12"
|
||||
# dependencies = [
|
||||
# "boto3",
|
||||
# ]
|
||||
# ///
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import random
|
||||
import string
|
||||
import subprocess
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
|
||||
import boto3
|
||||
|
||||
REGION_NAME = "us-east-1"
|
||||
|
||||
|
||||
class Actions(str, Enum):
|
||||
Get = "Get"
|
||||
Put = "Put"
|
||||
List = "List"
|
||||
|
||||
|
||||
def get_user_dir(bucket_name, user, with_bucket=True):
|
||||
if with_bucket:
|
||||
return f"{bucket_name}/user-id-{user}"
|
||||
|
||||
return f"user-id-{user}"
|
||||
|
||||
|
||||
def create_power_user():
|
||||
power_user_key = "power_user_key"
|
||||
power_user_secret = "power_user_secret"
|
||||
command = f"s3.configure -apply -user poweruser -access_key {power_user_key} -secret_key {power_user_secret} -actions Admin"
|
||||
print("Creating Power User...")
|
||||
subprocess.run(
|
||||
["docker", "exec", "-i", "seaweedfs-master-1", "weed", "shell"],
|
||||
input=command,
|
||||
text=True,
|
||||
stdout=subprocess.PIPE,
|
||||
)
|
||||
print(
|
||||
f"Power User created with key: {power_user_key} and secret: {power_user_secret}"
|
||||
)
|
||||
return power_user_key, power_user_secret
|
||||
|
||||
|
||||
def create_bucket(s3_client, bucket_name):
|
||||
print(f"Creating Bucket {bucket_name}...")
|
||||
s3_client.create_bucket(Bucket=bucket_name)
|
||||
print(f"Bucket {bucket_name} created.")
|
||||
|
||||
|
||||
def upload_file(s3_client, bucket_name, user, file_path, custom_remote_path=None):
|
||||
user_dir = get_user_dir(bucket_name, user, with_bucket=False)
|
||||
if custom_remote_path:
|
||||
remote_path = custom_remote_path
|
||||
else:
|
||||
remote_path = f"{user_dir}/{str(Path(file_path).name)}"
|
||||
|
||||
print(f"Uploading {file_path} for {user}... on {user_dir}")
|
||||
|
||||
s3_client.upload_file(file_path, bucket_name, remote_path)
|
||||
print(f"File {file_path} uploaded for {user}.")
|
||||
|
||||
|
||||
def create_user(iam_client, user):
|
||||
print(f"Creating user {user}...")
|
||||
response = iam_client.create_access_key(UserName=user)
|
||||
print(
|
||||
f"User {user} created with access key: {response['AccessKey']['AccessKeyId']}"
|
||||
)
|
||||
return response
|
||||
|
||||
|
||||
def list_files(s3_client, bucket_name, path=None):
|
||||
if path is None:
|
||||
path = ""
|
||||
print(f"Listing files of s3://{bucket_name}/{path}...")
|
||||
try:
|
||||
response = s3_client.list_objects_v2(Bucket=bucket_name, Prefix=path)
|
||||
if "Contents" in response:
|
||||
for obj in response["Contents"]:
|
||||
print(f"\t - {obj['Key']}")
|
||||
else:
|
||||
print("No files found.")
|
||||
except Exception as e:
|
||||
print(f"Error listing files: {e}")
|
||||
|
||||
|
||||
def create_policy_for_user(
|
||||
iam_client, user, bucket_name, actions=[Actions.Get, Actions.List]
|
||||
):
|
||||
print(f"Creating policy for {user} on {bucket_name}...")
|
||||
policy_document = {
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [f"s3:{action.value}*" for action in actions],
|
||||
"Resource": [
|
||||
f"arn:aws:s3:::{get_user_dir(bucket_name, user)}/*",
|
||||
],
|
||||
}
|
||||
],
|
||||
}
|
||||
policy_name = f"{user}-{bucket_name}-full-access"
|
||||
|
||||
policy_json = json.dumps(policy_document)
|
||||
filepath = f"/tmp/{policy_name}.json"
|
||||
with open(filepath, "w") as f:
|
||||
f.write(json.dumps(policy_document, indent=2))
|
||||
|
||||
iam_client.put_user_policy(
|
||||
PolicyName=policy_name, PolicyDocument=policy_json, UserName=user
|
||||
)
|
||||
print(f"Policy for {user} on {bucket_name} created.")
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="SeaweedFS S3 Test Script")
|
||||
parser.add_argument(
|
||||
"--s3-url", default="http://127.0.0.1:8333", help="S3 endpoint URL"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--iam-url", default="http://127.0.0.1:8111", help="IAM endpoint URL"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
bucket_name = (
|
||||
f"test-bucket-{''.join(random.choices(string.digits + 'abcdef', k=8))}"
|
||||
)
|
||||
sentinel_file = "/tmp/SENTINEL"
|
||||
with open(sentinel_file, "w") as f:
|
||||
f.write("Hello World")
|
||||
print(f"SENTINEL file created at {sentinel_file}")
|
||||
|
||||
power_user_key, power_user_secret = create_power_user()
|
||||
|
||||
admin_s3_client = get_s3_client(args, power_user_key, power_user_secret)
|
||||
iam_client = get_iam_client(args, power_user_key, power_user_secret)
|
||||
|
||||
create_bucket(admin_s3_client, bucket_name)
|
||||
upload_file(admin_s3_client, bucket_name, "Alice", sentinel_file)
|
||||
upload_file(admin_s3_client, bucket_name, "Bob", sentinel_file)
|
||||
list_files(admin_s3_client, bucket_name)
|
||||
|
||||
alice_user_info = create_user(iam_client, "Alice")
|
||||
bob_user_info = create_user(iam_client, "Bob")
|
||||
|
||||
alice_key = alice_user_info["AccessKey"]["AccessKeyId"]
|
||||
alice_secret = alice_user_info["AccessKey"]["SecretAccessKey"]
|
||||
bob_key = bob_user_info["AccessKey"]["AccessKeyId"]
|
||||
bob_secret = bob_user_info["AccessKey"]["SecretAccessKey"]
|
||||
|
||||
# Make sure Admin can read any files
|
||||
list_files(admin_s3_client, bucket_name)
|
||||
list_files(
|
||||
admin_s3_client,
|
||||
bucket_name,
|
||||
get_user_dir(bucket_name, "Alice", with_bucket=False),
|
||||
)
|
||||
list_files(
|
||||
admin_s3_client,
|
||||
bucket_name,
|
||||
get_user_dir(bucket_name, "Bob", with_bucket=False),
|
||||
)
|
||||
|
||||
# Create read policy for Alice and Bob
|
||||
create_policy_for_user(iam_client, "Alice", bucket_name)
|
||||
create_policy_for_user(iam_client, "Bob", bucket_name)
|
||||
|
||||
alice_s3_client = get_s3_client(args, alice_key, alice_secret)
|
||||
|
||||
# Make sure Alice can read her files
|
||||
list_files(
|
||||
alice_s3_client,
|
||||
bucket_name,
|
||||
get_user_dir(bucket_name, "Alice", with_bucket=False) + "/",
|
||||
)
|
||||
|
||||
# Make sure Bob can read his files
|
||||
bob_s3_client = get_s3_client(args, bob_key, bob_secret)
|
||||
list_files(
|
||||
bob_s3_client,
|
||||
bucket_name,
|
||||
get_user_dir(bucket_name, "Bob", with_bucket=False) + "/",
|
||||
)
|
||||
|
||||
# Update policy to include write
|
||||
create_policy_for_user(iam_client, "Alice", bucket_name, actions=[Actions.Put, Actions.Get, Actions.List]) # fmt: off
|
||||
create_policy_for_user(iam_client, "Bob", bucket_name, actions=[Actions.Put, Actions.Get, Actions.List]) # fmt: off
|
||||
|
||||
print("############################# Make sure Alice can write her files")
|
||||
upload_file(
|
||||
alice_s3_client,
|
||||
bucket_name,
|
||||
"Alice",
|
||||
sentinel_file,
|
||||
custom_remote_path=f"{get_user_dir(bucket_name, 'Alice', with_bucket=False)}/SENTINEL_by_Alice",
|
||||
)
|
||||
|
||||
|
||||
print("############################# Make sure Bob can write his files")
|
||||
upload_file(
|
||||
bob_s3_client,
|
||||
bucket_name,
|
||||
"Bob",
|
||||
sentinel_file,
|
||||
custom_remote_path=f"{get_user_dir(bucket_name, 'Bob', with_bucket=False)}/SENTINEL_by_Bob",
|
||||
)
|
||||
|
||||
|
||||
print("############################# Make sure Alice can read her new files")
|
||||
list_files(
|
||||
alice_s3_client,
|
||||
bucket_name,
|
||||
get_user_dir(bucket_name, "Alice", with_bucket=False) + "/",
|
||||
)
|
||||
|
||||
|
||||
print("############################# Make sure Bob can read his new files")
|
||||
list_files(
|
||||
bob_s3_client,
|
||||
bucket_name,
|
||||
get_user_dir(bucket_name, "Bob", with_bucket=False) + "/",
|
||||
)
|
||||
|
||||
|
||||
print("############################# Make sure Bob cannot read Alice's files")
|
||||
list_files(
|
||||
bob_s3_client,
|
||||
bucket_name,
|
||||
get_user_dir(bucket_name, "Alice", with_bucket=False) + "/",
|
||||
)
|
||||
|
||||
print("############################# Make sure Alice cannot read Bob's files")
|
||||
|
||||
list_files(
|
||||
alice_s3_client,
|
||||
bucket_name,
|
||||
get_user_dir(bucket_name, "Bob", with_bucket=False) + "/",
|
||||
)
|
||||
|
||||
|
||||
|
||||
def get_iam_client(args, access_key, secret_key):
|
||||
iam_client = boto3.client(
|
||||
"iam",
|
||||
endpoint_url=args.iam_url,
|
||||
region_name=REGION_NAME,
|
||||
aws_access_key_id=access_key,
|
||||
aws_secret_access_key=secret_key,
|
||||
)
|
||||
return iam_client
|
||||
|
||||
|
||||
def get_s3_client(args, access_key, secret_key):
|
||||
s3_client = boto3.client(
|
||||
"s3",
|
||||
endpoint_url=args.s3_url,
|
||||
region_name=REGION_NAME,
|
||||
aws_access_key_id=access_key,
|
||||
aws_secret_access_key=secret_key,
|
||||
)
|
||||
return s3_client
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
411
go.mod
411
go.mod
|
@ -1,411 +0,0 @@
|
|||
module github.com/seaweedfs/seaweedfs
|
||||
|
||||
go 1.24
|
||||
|
||||
toolchain go1.24.1
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.121.4 // indirect
|
||||
cloud.google.com/go/pubsub v1.49.0
|
||||
cloud.google.com/go/storage v1.55.0
|
||||
github.com/Azure/azure-pipeline-go v0.2.3
|
||||
github.com/Azure/azure-storage-blob-go v0.15.0
|
||||
github.com/Shopify/sarama v1.38.1
|
||||
github.com/aws/aws-sdk-go v1.55.7
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bwmarrin/snowflake v0.3.0
|
||||
github.com/cenkalti/backoff/v4 v4.3.0
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/dustin/go-humanize v1.0.1
|
||||
github.com/eapache/go-resiliency v1.3.0 // indirect
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 // indirect
|
||||
github.com/eapache/queue v1.1.0 // indirect
|
||||
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a
|
||||
github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c // indirect
|
||||
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
|
||||
github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4
|
||||
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/go-redsync/redsync/v4 v4.13.0
|
||||
github.com/go-sql-driver/mysql v1.9.3
|
||||
github.com/go-zookeeper/zk v1.0.3 // indirect
|
||||
github.com/gocql/gocql v1.7.0
|
||||
github.com/golang/protobuf v1.5.4
|
||||
github.com/golang/snappy v1.0.0 // indirect
|
||||
github.com/google/btree v1.1.3
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/google/wire v0.6.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.15.0 // indirect
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-uuid v1.0.3 // indirect
|
||||
github.com/jcmturner/gofork v1.7.6 // indirect
|
||||
github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect
|
||||
github.com/jinzhu/copier v0.4.0
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/karlseguin/ccache/v2 v2.0.8
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/reedsolomon v1.12.5
|
||||
github.com/kurin/blazer v0.5.3
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/linxGnu/grocksdb v1.10.1
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-ieproxy v0.0.11 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/olivere/elastic/v7 v7.0.32
|
||||
github.com/peterh/liner v1.2.2
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/posener/complete v1.2.3
|
||||
github.com/pquerna/cachecontrol v0.2.0
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.64.0 // indirect
|
||||
github.com/prometheus/procfs v0.17.0
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/seaweedfs/goexif v1.0.3
|
||||
github.com/seaweedfs/raft v1.1.3
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/spf13/afero v1.12.0 // indirect
|
||||
github.com/spf13/cast v1.7.1 // indirect
|
||||
github.com/spf13/viper v1.20.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965
|
||||
github.com/tidwall/gjson v1.18.0
|
||||
github.com/tidwall/match v1.1.1
|
||||
github.com/tidwall/pretty v1.2.0 // indirect
|
||||
github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365
|
||||
github.com/tylertreat/BoomFilters v0.0.0-20210315201527-1a82519a3e43
|
||||
github.com/valyala/bytebufferpool v1.0.0
|
||||
github.com/viant/ptrie v1.0.1
|
||||
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
|
||||
github.com/xdg-go/scram v1.1.2 // indirect
|
||||
github.com/xdg-go/stringprep v1.0.4 // indirect
|
||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.6.2
|
||||
go.mongodb.org/mongo-driver v1.17.4
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
gocloud.dev v0.43.0
|
||||
gocloud.dev/pubsub/natspubsub v0.42.0
|
||||
gocloud.dev/pubsub/rabbitpubsub v0.43.0
|
||||
golang.org/x/crypto v0.40.0
|
||||
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476
|
||||
golang.org/x/image v0.29.0
|
||||
golang.org/x/net v0.42.0
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sys v0.34.0
|
||||
golang.org/x/text v0.27.0 // indirect
|
||||
golang.org/x/tools v0.35.0
|
||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
|
||||
google.golang.org/api v0.242.0
|
||||
google.golang.org/genproto v0.0.0-20250715232539-7130f93afb79 // indirect
|
||||
google.golang.org/grpc v1.73.0
|
||||
google.golang.org/protobuf v1.36.6
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
modernc.org/b v1.0.0 // indirect
|
||||
modernc.org/mathutil v1.7.1
|
||||
modernc.org/memory v1.11.0 // indirect
|
||||
modernc.org/sqlite v1.38.0
|
||||
modernc.org/strutil v1.2.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Jille/raft-grpc-transport v1.6.1
|
||||
github.com/ThreeDotsLabs/watermill v1.4.7
|
||||
github.com/a-h/templ v0.3.920
|
||||
github.com/arangodb/go-driver v1.6.6
|
||||
github.com/armon/go-metrics v0.4.1
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.6
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.18
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.71
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.84.1
|
||||
github.com/cognusion/imaging v1.0.2
|
||||
github.com/fluent/fluent-logger-golang v1.10.0
|
||||
github.com/getsentry/sentry-go v0.34.1
|
||||
github.com/gin-contrib/sessions v1.0.4
|
||||
github.com/gin-gonic/gin v1.10.1
|
||||
github.com/golang-jwt/jwt/v5 v5.2.3
|
||||
github.com/google/flatbuffers/go v0.0.0-20230108230133-3b8644d32c50
|
||||
github.com/hanwen/go-fuse/v2 v2.8.0
|
||||
github.com/hashicorp/raft v1.7.3
|
||||
github.com/hashicorp/raft-boltdb/v2 v2.3.1
|
||||
github.com/minio/crc64nvme v1.0.2
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1
|
||||
github.com/parquet-go/parquet-go v0.25.1
|
||||
github.com/pkg/sftp v1.13.9
|
||||
github.com/rabbitmq/amqp091-go v1.10.0
|
||||
github.com/rclone/rclone v1.70.3
|
||||
github.com/rdleal/intervalst v1.5.0
|
||||
github.com/redis/go-redis/v9 v9.11.0
|
||||
github.com/schollz/progressbar/v3 v3.18.0
|
||||
github.com/shirou/gopsutil/v3 v3.24.5
|
||||
github.com/tarantool/go-tarantool/v2 v2.4.0
|
||||
github.com/tikv/client-go/v2 v2.0.7
|
||||
github.com/ydb-platform/ydb-go-sdk-auth-environ v0.5.0
|
||||
github.com/ydb-platform/ydb-go-sdk/v3 v3.113.1
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.6.2
|
||||
go.uber.org/atomic v1.11.0
|
||||
golang.org/x/sync v0.16.0
|
||||
google.golang.org/grpc/security/advancedtls v1.0.0
|
||||
)
|
||||
|
||||
require github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 // indirect
|
||||
|
||||
require (
|
||||
github.com/cenkalti/backoff/v3 v3.2.2 // indirect
|
||||
github.com/lithammer/shortuuid/v3 v3.0.7 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.24.0 // indirect
|
||||
cloud.google.com/go/auth v0.16.3 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.7.0 // indirect
|
||||
cloud.google.com/go/iam v1.5.2 // indirect
|
||||
cloud.google.com/go/monitoring v1.24.2 // indirect
|
||||
filippo.io/edwards25519 v1.1.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.1 // indirect
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.173 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 // indirect
|
||||
github.com/IBM/go-sdk-core/v5 v5.20.0 // indirect
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf // indirect
|
||||
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e // indirect
|
||||
github.com/ProtonMail/go-crypto v1.3.0 // indirect
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
|
||||
github.com/ProtonMail/go-srp v0.0.7 // indirect
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.9.0 // indirect
|
||||
github.com/PuerkitoBio/goquery v1.10.3 // indirect
|
||||
github.com/abbot/go-http-auth v0.4.0 // indirect
|
||||
github.com/andybalholm/brotli v1.1.0 // indirect
|
||||
github.com/andybalholm/cascadia v1.3.3 // indirect
|
||||
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc // indirect
|
||||
github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.33 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.84 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.37 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.37 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.37 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.18 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.18 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sns v1.34.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.38.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.34.1 // indirect
|
||||
github.com/aws/smithy-go v1.22.4 // indirect
|
||||
github.com/boltdb/bolt v1.3.1 // indirect
|
||||
github.com/bradenaw/juniper v0.15.3 // indirect
|
||||
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
|
||||
github.com/buengese/sgzip v0.1.1 // indirect
|
||||
github.com/bytedance/sonic v1.13.2 // indirect
|
||||
github.com/bytedance/sonic/loader v0.2.4 // indirect
|
||||
github.com/calebcase/tmpfile v1.0.3 // indirect
|
||||
github.com/chilts/sid v0.0.0-20190607042430-660e94789ec9 // indirect
|
||||
github.com/cloudflare/circl v1.6.1 // indirect
|
||||
github.com/cloudinary/cloudinary-go/v2 v2.10.0 // indirect
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc // indirect
|
||||
github.com/cloudsoda/sddl v0.0.0-20250224235906-926454e91efc // indirect
|
||||
github.com/cloudwego/base64x v0.1.5 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect
|
||||
github.com/colinmarc/hdfs/v2 v2.4.0 // indirect
|
||||
github.com/creasty/defaults v1.8.0 // indirect
|
||||
github.com/cronokirby/saferith v0.33.0 // indirect
|
||||
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect
|
||||
github.com/d4l3k/messagediff v1.2.1 // indirect
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5 // indirect
|
||||
github.com/ebitengine/purego v0.8.4 // indirect
|
||||
github.com/elastic/gosigar v0.14.2 // indirect
|
||||
github.com/emersion/go-message v0.18.2 // indirect
|
||||
github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff // indirect
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
|
||||
github.com/fatih/color v1.16.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/flynn/noise v1.1.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.9 // indirect
|
||||
github.com/geoffgarside/ber v1.2.0 // indirect
|
||||
github.com/gin-contrib/sse v1.0.0 // indirect
|
||||
github.com/go-chi/chi/v5 v5.2.2 // indirect
|
||||
github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.1.1 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-openapi/errors v0.22.1 // indirect
|
||||
github.com/go-openapi/strfmt v0.23.0 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.26.0 // indirect
|
||||
github.com/go-resty/resty/v2 v2.16.5 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.3.0 // indirect
|
||||
github.com/goccy/go-json v0.10.5 // indirect
|
||||
github.com/gofrs/flock v0.12.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
|
||||
github.com/gorilla/context v1.1.2 // indirect
|
||||
github.com/gorilla/schema v1.4.1 // indirect
|
||||
github.com/gorilla/securecookie v1.1.2 // indirect
|
||||
github.com/gorilla/sessions v1.4.0 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-hclog v1.6.3 // indirect
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
||||
github.com/hashicorp/go-metrics v0.5.4 // indirect
|
||||
github.com/hashicorp/go-msgpack/v2 v2.1.2 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
|
||||
github.com/hashicorp/golang-lru v0.6.0 // indirect
|
||||
github.com/henrybear327/Proton-API-Bridge v1.0.0 // indirect
|
||||
github.com/henrybear327/go-proton-api v1.0.0 // indirect
|
||||
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
|
||||
github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
|
||||
github.com/jcmturner/goidentity/v6 v6.0.1 // indirect
|
||||
github.com/jcmturner/rpc/v2 v2.0.3 // indirect
|
||||
github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3 // indirect
|
||||
github.com/jonboulle/clockwork v0.5.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/jtolio/noiseconn v0.0.0-20231127013910-f6d9ecbf1de7 // indirect
|
||||
github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 // indirect
|
||||
github.com/k0kubun/pp v3.0.1+incompatible
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
|
||||
github.com/koofr/go-httpclient v0.0.0-20240520111329-e20f8f203988 // indirect
|
||||
github.com/koofr/go-koofrclient v0.0.0-20221207135200-cbd7fc9ad6a6 // indirect
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/lanrat/extsort v1.0.2 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/lpar/date v1.0.0 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/montanaflynn/stats v0.7.1 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/nats-io/nats.go v1.40.1 // indirect
|
||||
github.com/nats-io/nkeys v0.4.10 // indirect
|
||||
github.com/nats-io/nuid v1.0.1 // indirect
|
||||
github.com/ncruces/go-strftime v0.1.9 // indirect
|
||||
github.com/ncw/swift/v2 v2.0.4 // indirect
|
||||
github.com/nxadm/tail v1.4.11 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.23.3 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/oracle/oci-go-sdk/v65 v65.93.0 // indirect
|
||||
github.com/panjf2000/ants/v2 v2.11.3 // indirect
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 // indirect
|
||||
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.21 // indirect
|
||||
github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c // indirect
|
||||
github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c // indirect
|
||||
github.com/pingcap/kvproto v0.0.0-20230403051650-e166ae588106 // indirect
|
||||
github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pkg/xattr v0.4.10 // indirect
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
|
||||
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 // indirect
|
||||
github.com/relvacode/iso8601 v1.6.0 // indirect
|
||||
github.com/rfjakob/eme v1.1.2 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // indirect
|
||||
github.com/sagikazarmark/locafero v0.7.0 // indirect
|
||||
github.com/samber/lo v1.50.0 // indirect
|
||||
github.com/shirou/gopsutil/v4 v4.25.5 // indirect
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
|
||||
github.com/smartystreets/goconvey v1.8.1 // indirect
|
||||
github.com/sony/gobreaker v1.0.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.24 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5 // indirect
|
||||
github.com/tarantool/go-iproto v1.1.0 // indirect
|
||||
github.com/tiancaiamao/gp v0.0.0-20221230034425-4025bc8a4d4a // indirect
|
||||
github.com/tikv/pd/client v0.0.0-20230329114254-1948c247c2b1 // indirect
|
||||
github.com/tinylib/msgp v1.3.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.15 // indirect
|
||||
github.com/tklauser/numcpus v0.10.0 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/twmb/murmur3 v1.1.3 // indirect
|
||||
github.com/ugorji/go/codec v1.2.12 // indirect
|
||||
github.com/unknwon/goconfig v1.0.0 // indirect
|
||||
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
||||
github.com/xanzy/ssh-agent v0.3.3 // indirect
|
||||
github.com/yandex-cloud/go-genproto v0.0.0-20211115083454-9ca41db5ed9e // indirect
|
||||
github.com/ydb-platform/ydb-go-genproto v0.0.0-20241112172322-ea1f63298f77 // indirect
|
||||
github.com/ydb-platform/ydb-go-yc v0.12.1 // indirect
|
||||
github.com/ydb-platform/ydb-go-yc-metadata v0.6.1 // indirect
|
||||
github.com/yunify/qingstor-sdk-go/v3 v3.2.0 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
github.com/zeebo/blake3 v0.2.4 // indirect
|
||||
github.com/zeebo/errs v1.4.0 // indirect
|
||||
go.etcd.io/bbolt v1.4.0 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.6.2 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.37.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.62.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 // indirect
|
||||
go.opentelemetry.io/otel v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/arch v0.16.0 // indirect
|
||||
golang.org/x/term v0.33.0 // indirect
|
||||
golang.org/x/time v0.12.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250715232539-7130f93afb79 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250715232539-7130f93afb79 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/validator.v2 v2.0.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
modernc.org/libc v1.65.10 // indirect
|
||||
moul.io/http2curl/v2 v2.3.0 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
storj.io/common v0.0.0-20250605163628-70ca83b6228e // indirect
|
||||
storj.io/drpc v0.0.35-0.20250513201419-f7819ea69b55 // indirect
|
||||
storj.io/eventkit v0.0.0-20250410172343-61f26d3de156 // indirect
|
||||
storj.io/infectious v0.0.2 // indirect
|
||||
storj.io/picobuf v0.0.4 // indirect
|
||||
storj.io/uplink v1.13.1 // indirect
|
||||
)
|
||||
|
||||
// replace github.com/seaweedfs/raft => /Users/chrislu/go/src/github.com/seaweedfs/raft
|
|
@ -1,16 +0,0 @@
|
|||
# Artifact Hub repository metadata file
|
||||
#
|
||||
# Some settings like the verified publisher flag or the ignored packages won't
|
||||
# be applied until the next time the repository is processed. Please keep in
|
||||
# mind that the repository won't be processed if it has not changed since the
|
||||
# last time it was processed. Depending on the repository kind, this is checked
|
||||
# in a different way. For Helm http based repositories, we consider it has
|
||||
# changed if the `index.yaml` file changes. For git based repositories, it does
|
||||
# when the hash of the last commit in the branch you set up changes. This does
|
||||
# NOT apply to ownership claim operations, which are processed immediately.
|
||||
#
|
||||
|
||||
repositoryID: 5b2f1fe2-20e5-486e-9746-183484642aa2
|
||||
# owners: # (optional, used to claim repository ownership)
|
||||
# - name: username
|
||||
# email: email
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue