name: "Ceph S3 tests" on: push: branches: [ master ] pull_request: branches: [ master ] concurrency: group: ${{ github.head_ref }}/s3tests cancel-in-progress: true permissions: contents: read jobs: basic-s3-tests: name: Basic S3 tests (KV store) runs-on: ubuntu-22.04 timeout-minutes: 15 steps: - name: Check out code into the Go module directory uses: actions/checkout@v4 - name: Set up Go 1.x uses: actions/setup-go@v5.5.0 with: go-version-file: 'go.mod' id: go - name: Set up Python uses: actions/setup-python@v4 with: python-version: '3.9' - name: Clone s3-tests run: | git clone https://github.com/ceph/s3-tests.git cd s3-tests pip install -r requirements.txt pip install tox pip install -e . - name: Run Basic S3 tests timeout-minutes: 15 env: S3TEST_CONF: ../docker/compose/s3tests.conf shell: bash run: | cd weed go install -buildvcs=false set -x # Create clean data directory for this test run export WEED_DATA_DIR="/tmp/seaweedfs-s3tests-$(date +%s)" mkdir -p "$WEED_DATA_DIR" weed -v 0 server -filer -filer.maxMB=64 -s3 -ip.bind 0.0.0.0 \ -dir="$WEED_DATA_DIR" \ -master.raftHashicorp -master.electionTimeout 1s -master.volumeSizeLimitMB=100 \ -volume.max=100 -volume.preStopSeconds=1 \ -master.port=9333 -volume.port=8080 -filer.port=8888 -s3.port=8000 -metricsPort=9324 \ -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config=../docker/compose/s3.json & pid=$! # Wait for all SeaweedFS components to be ready echo "Waiting for SeaweedFS components to start..." for i in {1..30}; do if curl -s http://localhost:9333/cluster/status > /dev/null 2>&1; then echo "Master server is ready" break fi echo "Waiting for master server... ($i/30)" sleep 2 done for i in {1..30}; do if curl -s http://localhost:8080/status > /dev/null 2>&1; then echo "Volume server is ready" break fi echo "Waiting for volume server... ($i/30)" sleep 2 done for i in {1..30}; do if curl -s http://localhost:8888/ > /dev/null 2>&1; then echo "Filer is ready" break fi echo "Waiting for filer... ($i/30)" sleep 2 done for i in {1..30}; do if curl -s http://localhost:8000/ > /dev/null 2>&1; then echo "S3 server is ready" break fi echo "Waiting for S3 server... ($i/30)" sleep 2 done echo "All SeaweedFS components are ready!" cd ../s3-tests sed -i "s/assert prefixes == \['foo%2B1\/', 'foo\/', 'quux%20ab\/'\]/assert prefixes == \['foo\/', 'foo%2B1\/', 'quux%20ab\/'\]/" s3tests_boto3/functional/test_s3.py # Debug: Show the config file contents echo "=== S3 Config File Contents ===" cat ../docker/compose/s3tests.conf echo "=== End Config ===" # Additional wait for S3-Filer integration to be fully ready echo "Waiting additional 10 seconds for S3-Filer integration..." sleep 10 # Test S3 connection before running tests echo "Testing S3 connection..." for i in {1..10}; do if curl -s -f http://localhost:8000/ > /dev/null 2>&1; then echo "S3 connection test successful" break fi echo "S3 connection test failed, retrying... ($i/10)" sleep 2 done echo "✅ S3 server is responding, starting tests..." tox -- \ s3tests_boto3/functional/test_s3.py::test_bucket_list_empty \ s3tests_boto3/functional/test_s3.py::test_bucket_list_distinct \ s3tests_boto3/functional/test_s3.py::test_bucket_list_many \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_many \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_basic \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_encoding_basic \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_prefix \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_prefix_ends_with_delimiter \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_alt \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_prefix_underscore \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_percentage \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_whitespace \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_dot \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_unreadable \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_empty \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_none \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_not_exist \ s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_delimiter_basic \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_delimiter_basic \ s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_delimiter_alt \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_delimiter_alt \ s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_delimiter_prefix_not_exist \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_delimiter_prefix_not_exist \ s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_delimiter_delimiter_not_exist \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_delimiter_delimiter_not_exist \ s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_delimiter_prefix_delimiter_not_exist \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_fetchowner_notempty \ s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_basic \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_basic \ s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_alt \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_alt \ s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_empty \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_empty \ s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_none \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_none \ s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_not_exist \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_not_exist \ s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_unreadable \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_unreadable \ s3tests_boto3/functional/test_s3.py::test_bucket_list_maxkeys_one \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_maxkeys_one \ s3tests_boto3/functional/test_s3.py::test_bucket_list_maxkeys_zero \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_maxkeys_zero \ s3tests_boto3/functional/test_s3.py::test_bucket_list_marker_none \ s3tests_boto3/functional/test_s3.py::test_bucket_list_marker_empty \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_continuationtoken_empty \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_continuationtoken \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_both_continuationtoken_startafter \ s3tests_boto3/functional/test_s3.py::test_bucket_list_marker_unreadable \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_startafter_unreadable \ s3tests_boto3/functional/test_s3.py::test_bucket_list_marker_not_in_list \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_startafter_not_in_list \ s3tests_boto3/functional/test_s3.py::test_bucket_list_marker_after_list \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_startafter_after_list \ s3tests_boto3/functional/test_s3.py::test_bucket_list_objects_anonymous_fail \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_objects_anonymous_fail \ s3tests_boto3/functional/test_s3.py::test_bucket_list_long_name \ s3tests_boto3/functional/test_s3.py::test_bucket_list_special_prefix \ s3tests_boto3/functional/test_s3.py::test_bucket_delete_notexist \ s3tests_boto3/functional/test_s3.py::test_bucket_create_delete \ s3tests_boto3/functional/test_s3.py::test_object_read_not_exist \ s3tests_boto3/functional/test_s3.py::test_multi_object_delete \ s3tests_boto3/functional/test_s3.py::test_multi_objectv2_delete \ s3tests_boto3/functional/test_s3.py::test_object_head_zero_bytes \ s3tests_boto3/functional/test_s3.py::test_object_write_check_etag \ s3tests_boto3/functional/test_s3.py::test_object_write_cache_control \ s3tests_boto3/functional/test_s3.py::test_object_write_expires \ s3tests_boto3/functional/test_s3.py::test_object_write_read_update_read_delete \ s3tests_boto3/functional/test_s3.py::test_object_metadata_replaced_on_put \ s3tests_boto3/functional/test_s3.py::test_object_write_file \ s3tests_boto3/functional/test_s3.py::test_post_object_invalid_date_format \ s3tests_boto3/functional/test_s3.py::test_post_object_no_key_specified \ s3tests_boto3/functional/test_s3.py::test_post_object_missing_signature \ s3tests_boto3/functional/test_s3.py::test_post_object_condition_is_case_sensitive \ s3tests_boto3/functional/test_s3.py::test_post_object_expires_is_case_sensitive \ s3tests_boto3/functional/test_s3.py::test_post_object_missing_expires_condition \ s3tests_boto3/functional/test_s3.py::test_post_object_missing_conditions_list \ s3tests_boto3/functional/test_s3.py::test_post_object_upload_size_limit_exceeded \ s3tests_boto3/functional/test_s3.py::test_post_object_missing_content_length_argument \ s3tests_boto3/functional/test_s3.py::test_post_object_invalid_content_length_argument \ s3tests_boto3/functional/test_s3.py::test_post_object_upload_size_below_minimum \ s3tests_boto3/functional/test_s3.py::test_post_object_empty_conditions \ s3tests_boto3/functional/test_s3.py::test_get_object_ifmatch_good \ s3tests_boto3/functional/test_s3.py::test_get_object_ifnonematch_good \ s3tests_boto3/functional/test_s3.py::test_get_object_ifmatch_failed \ s3tests_boto3/functional/test_s3.py::test_get_object_ifnonematch_failed \ s3tests_boto3/functional/test_s3.py::test_get_object_ifmodifiedsince_good \ s3tests_boto3/functional/test_s3.py::test_get_object_ifmodifiedsince_failed \ s3tests_boto3/functional/test_s3.py::test_get_object_ifunmodifiedsince_failed \ s3tests_boto3/functional/test_s3.py::test_bucket_head \ s3tests_boto3/functional/test_s3.py::test_bucket_head_notexist \ s3tests_boto3/functional/test_s3.py::test_object_raw_authenticated \ s3tests_boto3/functional/test_s3.py::test_object_raw_authenticated_bucket_acl \ s3tests_boto3/functional/test_s3.py::test_object_raw_authenticated_object_acl \ s3tests_boto3/functional/test_s3.py::test_object_raw_authenticated_object_gone \ s3tests_boto3/functional/test_s3.py::test_object_raw_get_x_amz_expires_out_range_zero \ s3tests_boto3/functional/test_s3.py::test_object_anon_put \ s3tests_boto3/functional/test_s3.py::test_object_put_authenticated \ s3tests_boto3/functional/test_s3.py::test_bucket_recreate_overwrite_acl \ s3tests_boto3/functional/test_s3.py::test_bucket_recreate_new_acl \ s3tests_boto3/functional/test_s3.py::test_buckets_create_then_list \ s3tests_boto3/functional/test_s3.py::test_buckets_list_ctime \ s3tests_boto3/functional/test_s3.py::test_list_buckets_invalid_auth \ s3tests_boto3/functional/test_s3.py::test_list_buckets_bad_auth \ s3tests_boto3/functional/test_s3.py::test_bucket_create_naming_good_contains_period \ s3tests_boto3/functional/test_s3.py::test_bucket_create_naming_good_contains_hyphen \ s3tests_boto3/functional/test_s3.py::test_bucket_list_special_prefix \ s3tests_boto3/functional/test_s3.py::test_object_copy_zero_size \ s3tests_boto3/functional/test_s3.py::test_object_copy_same_bucket \ s3tests_boto3/functional/test_s3.py::test_object_copy_to_itself \ s3tests_boto3/functional/test_s3.py::test_object_copy_diff_bucket \ s3tests_boto3/functional/test_s3.py::test_object_copy_canned_acl \ s3tests_boto3/functional/test_s3.py::test_object_copy_bucket_not_found \ s3tests_boto3/functional/test_s3.py::test_object_copy_key_not_found \ s3tests_boto3/functional/test_s3.py::test_multipart_copy_small \ s3tests_boto3/functional/test_s3.py::test_multipart_copy_without_range \ s3tests_boto3/functional/test_s3.py::test_multipart_copy_special_names \ s3tests_boto3/functional/test_s3.py::test_multipart_copy_multiple_sizes \ s3tests_boto3/functional/test_s3.py::test_multipart_get_part \ s3tests_boto3/functional/test_s3.py::test_multipart_upload \ s3tests_boto3/functional/test_s3.py::test_multipart_upload_empty \ s3tests_boto3/functional/test_s3.py::test_multipart_upload_multiple_sizes \ s3tests_boto3/functional/test_s3.py::test_multipart_upload_contents \ s3tests_boto3/functional/test_s3.py::test_multipart_upload_overwrite_existing_object \ s3tests_boto3/functional/test_s3.py::test_multipart_upload_size_too_small \ s3tests_boto3/functional/test_s3.py::test_multipart_resend_first_finishes_last \ s3tests_boto3/functional/test_s3.py::test_multipart_upload_resend_part \ s3tests_boto3/functional/test_s3.py::test_multipart_upload_missing_part \ s3tests_boto3/functional/test_s3.py::test_multipart_upload_incorrect_etag \ s3tests_boto3/functional/test_s3.py::test_abort_multipart_upload \ s3tests_boto3/functional/test_s3.py::test_list_multipart_upload \ s3tests_boto3/functional/test_s3.py::test_atomic_read_1mb \ s3tests_boto3/functional/test_s3.py::test_atomic_read_4mb \ s3tests_boto3/functional/test_s3.py::test_atomic_read_8mb \ s3tests_boto3/functional/test_s3.py::test_atomic_write_1mb \ s3tests_boto3/functional/test_s3.py::test_atomic_write_4mb \ s3tests_boto3/functional/test_s3.py::test_atomic_write_8mb \ s3tests_boto3/functional/test_s3.py::test_atomic_dual_write_1mb \ s3tests_boto3/functional/test_s3.py::test_atomic_dual_write_4mb \ s3tests_boto3/functional/test_s3.py::test_atomic_dual_write_8mb \ s3tests_boto3/functional/test_s3.py::test_atomic_multipart_upload_write \ s3tests_boto3/functional/test_s3.py::test_ranged_request_response_code \ s3tests_boto3/functional/test_s3.py::test_ranged_big_request_response_code \ s3tests_boto3/functional/test_s3.py::test_ranged_request_skip_leading_bytes_response_code \ s3tests_boto3/functional/test_s3.py::test_ranged_request_return_trailing_bytes_response_code \ s3tests_boto3/functional/test_s3.py::test_copy_object_ifmatch_good \ s3tests_boto3/functional/test_s3.py::test_copy_object_ifnonematch_failed \ s3tests_boto3/functional/test_s3.py::test_copy_object_ifmatch_failed \ s3tests_boto3/functional/test_s3.py::test_copy_object_ifnonematch_good \ s3tests_boto3/functional/test_s3.py::test_lifecycle_set \ s3tests_boto3/functional/test_s3.py::test_lifecycle_get \ s3tests_boto3/functional/test_s3.py::test_lifecycle_set_filter kill -9 $pid || true # Clean up data directory rm -rf "$WEED_DATA_DIR" || true versioning-tests: name: S3 Versioning & Object Lock tests runs-on: ubuntu-22.04 timeout-minutes: 15 steps: - name: Check out code into the Go module directory uses: actions/checkout@v4 - name: Set up Go 1.x uses: actions/setup-go@v5.5.0 with: go-version-file: 'go.mod' id: go - name: Set up Python uses: actions/setup-python@v4 with: python-version: '3.9' - name: Clone s3-tests run: | git clone https://github.com/ceph/s3-tests.git cd s3-tests pip install -r requirements.txt pip install tox pip install -e . - name: Run S3 Object Lock, Retention, and Versioning tests timeout-minutes: 15 shell: bash run: | cd weed go install -buildvcs=false set -x # Create clean data directory for this test run export WEED_DATA_DIR="/tmp/seaweedfs-objectlock-versioning-$(date +%s)" mkdir -p "$WEED_DATA_DIR" weed -v 0 server -filer -filer.maxMB=64 -s3 -ip.bind 0.0.0.0 \ -dir="$WEED_DATA_DIR" \ -master.raftHashicorp -master.electionTimeout 1s -master.volumeSizeLimitMB=100 \ -volume.max=100 -volume.preStopSeconds=1 \ -master.port=9334 -volume.port=8081 -filer.port=8889 -s3.port=8001 -metricsPort=9325 \ -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config=../docker/compose/s3.json & pid=$! # Wait for all SeaweedFS components to be ready echo "Waiting for SeaweedFS components to start..." for i in {1..30}; do if curl -s http://localhost:9334/cluster/status > /dev/null 2>&1; then echo "Master server is ready" break fi echo "Waiting for master server... ($i/30)" sleep 2 done for i in {1..30}; do if curl -s http://localhost:8081/status > /dev/null 2>&1; then echo "Volume server is ready" break fi echo "Waiting for volume server... ($i/30)" sleep 2 done for i in {1..30}; do if curl -s http://localhost:8889/ > /dev/null 2>&1; then echo "Filer is ready" break fi echo "Waiting for filer... ($i/30)" sleep 2 done for i in {1..30}; do if curl -s http://localhost:8001/ > /dev/null 2>&1; then echo "S3 server is ready" break fi echo "Waiting for S3 server... ($i/30)" sleep 2 done echo "All SeaweedFS components are ready!" cd ../s3-tests sed -i "s/assert prefixes == \['foo%2B1\/', 'foo\/', 'quux%20ab\/'\]/assert prefixes == \['foo\/', 'foo%2B1\/', 'quux%20ab\/'\]/" s3tests_boto3/functional/test_s3.py # Fix bucket creation conflicts in versioning tests by replacing _create_objects calls sed -i 's/bucket_name = _create_objects(bucket_name=bucket_name,keys=key_names)/# Use the existing bucket for object creation\n client = get_client()\n for key in key_names:\n client.put_object(Bucket=bucket_name, Body=key, Key=key)/' s3tests_boto3/functional/test_s3.py sed -i 's/bucket = _create_objects(bucket_name=bucket_name, keys=key_names)/# Use the existing bucket for object creation\n client = get_client()\n for key in key_names:\n client.put_object(Bucket=bucket_name, Body=key, Key=key)/' s3tests_boto3/functional/test_s3.py # Create and update s3tests.conf to use port 8001 cp ../docker/compose/s3tests.conf ../docker/compose/s3tests-versioning.conf sed -i 's/port = 8000/port = 8001/g' ../docker/compose/s3tests-versioning.conf sed -i 's/:8000/:8001/g' ../docker/compose/s3tests-versioning.conf sed -i 's/localhost:8000/localhost:8001/g' ../docker/compose/s3tests-versioning.conf sed -i 's/127\.0\.0\.1:8000/127.0.0.1:8001/g' ../docker/compose/s3tests-versioning.conf export S3TEST_CONF=../docker/compose/s3tests-versioning.conf # Debug: Show the config file contents echo "=== S3 Config File Contents ===" cat ../docker/compose/s3tests-versioning.conf echo "=== End Config ===" # Additional wait for S3-Filer integration to be fully ready echo "Waiting additional 10 seconds for S3-Filer integration..." sleep 10 # Test S3 connection before running tests echo "Testing S3 connection..." for i in {1..10}; do if curl -s -f http://localhost:8001/ > /dev/null 2>&1; then echo "S3 connection test successful" break fi echo "S3 connection test failed, retrying... ($i/10)" sleep 2 done # tox -- s3tests_boto3/functional/test_s3.py -k "object_lock or (versioning and not test_versioning_obj_suspend_versions and not test_bucket_list_return_data_versioning and not test_versioning_concurrent_multi_object_delete)" --tb=short tox -- s3tests_boto3/functional/test_s3.py -k "object_lock or versioning" --tb=short kill -9 $pid || true # Clean up data directory rm -rf "$WEED_DATA_DIR" || true cors-tests: name: S3 CORS tests runs-on: ubuntu-22.04 timeout-minutes: 10 steps: - name: Check out code into the Go module directory uses: actions/checkout@v4 - name: Set up Go 1.x uses: actions/setup-go@v5.5.0 with: go-version-file: 'go.mod' id: go - name: Set up Python uses: actions/setup-python@v4 with: python-version: '3.9' - name: Clone s3-tests run: | git clone https://github.com/ceph/s3-tests.git cd s3-tests pip install -r requirements.txt pip install tox pip install -e . - name: Run S3 CORS tests timeout-minutes: 10 shell: bash run: | cd weed go install -buildvcs=false set -x # Create clean data directory for this test run export WEED_DATA_DIR="/tmp/seaweedfs-cors-test-$(date +%s)" mkdir -p "$WEED_DATA_DIR" weed -v 0 server -filer -filer.maxMB=64 -s3 -ip.bind 0.0.0.0 \ -dir="$WEED_DATA_DIR" \ -master.raftHashicorp -master.electionTimeout 1s -master.volumeSizeLimitMB=100 \ -volume.max=100 -volume.preStopSeconds=1 \ -master.port=9335 -volume.port=8082 -filer.port=8890 -s3.port=8002 -metricsPort=9326 \ -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config=../docker/compose/s3.json & pid=$! # Wait for all SeaweedFS components to be ready echo "Waiting for SeaweedFS components to start..." for i in {1..30}; do if curl -s http://localhost:9335/cluster/status > /dev/null 2>&1; then echo "Master server is ready" break fi echo "Waiting for master server... ($i/30)" sleep 2 done for i in {1..30}; do if curl -s http://localhost:8082/status > /dev/null 2>&1; then echo "Volume server is ready" break fi echo "Waiting for volume server... ($i/30)" sleep 2 done for i in {1..30}; do if curl -s http://localhost:8890/ > /dev/null 2>&1; then echo "Filer is ready" break fi echo "Waiting for filer... ($i/30)" sleep 2 done for i in {1..30}; do if curl -s http://localhost:8002/ > /dev/null 2>&1; then echo "S3 server is ready" break fi echo "Waiting for S3 server... ($i/30)" sleep 2 done echo "All SeaweedFS components are ready!" cd ../s3-tests sed -i "s/assert prefixes == \['foo%2B1\/', 'foo\/', 'quux%20ab\/'\]/assert prefixes == \['foo\/', 'foo%2B1\/', 'quux%20ab\/'\]/" s3tests_boto3/functional/test_s3.py # Create and update s3tests.conf to use port 8002 cp ../docker/compose/s3tests.conf ../docker/compose/s3tests-cors.conf sed -i 's/port = 8000/port = 8002/g' ../docker/compose/s3tests-cors.conf sed -i 's/:8000/:8002/g' ../docker/compose/s3tests-cors.conf sed -i 's/localhost:8000/localhost:8002/g' ../docker/compose/s3tests-cors.conf sed -i 's/127\.0\.0\.1:8000/127.0.0.1:8002/g' ../docker/compose/s3tests-cors.conf export S3TEST_CONF=../docker/compose/s3tests-cors.conf # Debug: Show the config file contents echo "=== S3 Config File Contents ===" cat ../docker/compose/s3tests-cors.conf echo "=== End Config ===" # Additional wait for S3-Filer integration to be fully ready echo "Waiting additional 10 seconds for S3-Filer integration..." sleep 10 # Test S3 connection before running tests echo "Testing S3 connection..." for i in {1..10}; do if curl -s -f http://localhost:8002/ > /dev/null 2>&1; then echo "S3 connection test successful" break fi echo "S3 connection test failed, retrying... ($i/10)" sleep 2 done # Run CORS-specific tests from s3-tests suite tox -- s3tests_boto3/functional/test_s3.py -k "cors" --tb=short || echo "No CORS tests found in s3-tests suite" # If no specific CORS tests exist, run bucket configuration tests that include CORS tox -- s3tests_boto3/functional/test_s3.py::test_put_bucket_cors || echo "No put_bucket_cors test found" tox -- s3tests_boto3/functional/test_s3.py::test_get_bucket_cors || echo "No get_bucket_cors test found" tox -- s3tests_boto3/functional/test_s3.py::test_delete_bucket_cors || echo "No delete_bucket_cors test found" kill -9 $pid || true # Clean up data directory rm -rf "$WEED_DATA_DIR" || true copy-tests: name: SeaweedFS Custom S3 Copy tests runs-on: ubuntu-22.04 timeout-minutes: 10 steps: - name: Check out code into the Go module directory uses: actions/checkout@v4 - name: Set up Go 1.x uses: actions/setup-go@v5.5.0 with: go-version-file: 'go.mod' id: go - name: Run SeaweedFS Custom S3 Copy tests timeout-minutes: 10 shell: bash run: | cd weed go install -buildvcs=false # Create clean data directory for this test run export WEED_DATA_DIR="/tmp/seaweedfs-copy-test-$(date +%s)" mkdir -p "$WEED_DATA_DIR" set -x weed -v 0 server -filer -filer.maxMB=64 -s3 -ip.bind 0.0.0.0 \ -dir="$WEED_DATA_DIR" \ -master.raftHashicorp -master.electionTimeout 1s -master.volumeSizeLimitMB=100 \ -volume.max=100 -volume.preStopSeconds=1 \ -master.port=9336 -volume.port=8083 -filer.port=8891 -s3.port=8003 -metricsPort=9327 \ -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config=../docker/compose/s3.json & pid=$! # Wait for all SeaweedFS components to be ready echo "Waiting for SeaweedFS components to start..." for i in {1..30}; do if curl -s http://localhost:9336/cluster/status > /dev/null 2>&1; then echo "Master server is ready" break fi echo "Waiting for master server... ($i/30)" sleep 2 done for i in {1..30}; do if curl -s http://localhost:8083/status > /dev/null 2>&1; then echo "Volume server is ready" break fi echo "Waiting for volume server... ($i/30)" sleep 2 done for i in {1..30}; do if curl -s http://localhost:8891/ > /dev/null 2>&1; then echo "Filer is ready" break fi echo "Waiting for filer... ($i/30)" sleep 2 done for i in {1..30}; do if curl -s http://localhost:8003/ > /dev/null 2>&1; then echo "S3 server is ready" break fi echo "Waiting for S3 server... ($i/30)" sleep 2 done echo "All SeaweedFS components are ready!" cd ../test/s3/copying # Patch Go tests to use the correct S3 endpoint (port 8003) sed -i 's/http:\/\/127\.0\.0\.1:8000/http:\/\/127.0.0.1:8003/g' s3_copying_test.go # Debug: Show what endpoint the Go tests will use echo "=== Go Test Configuration ===" grep -n "127.0.0.1" s3_copying_test.go || echo "No IP configuration found" echo "=== End Configuration ===" # Additional wait for S3-Filer integration to be fully ready echo "Waiting additional 10 seconds for S3-Filer integration..." sleep 10 # Test S3 connection before running tests echo "Testing S3 connection..." for i in {1..10}; do if curl -s -f http://localhost:8003/ > /dev/null 2>&1; then echo "S3 connection test successful" break fi echo "S3 connection test failed, retrying... ($i/10)" sleep 2 done go test -v kill -9 $pid || true # Clean up data directory rm -rf "$WEED_DATA_DIR" || true sql-store-tests: name: Basic S3 tests (SQL store) runs-on: ubuntu-22.04 timeout-minutes: 15 steps: - name: Check out code into the Go module directory uses: actions/checkout@v4 - name: Set up Go 1.x uses: actions/setup-go@v5.5.0 with: go-version-file: 'go.mod' id: go - name: Set up Python uses: actions/setup-python@v4 with: python-version: '3.9' - name: Clone s3-tests run: | git clone https://github.com/ceph/s3-tests.git cd s3-tests pip install -r requirements.txt pip install tox pip install -e . - name: Run Ceph S3 tests with SQL store timeout-minutes: 15 shell: bash run: | cd weed # Debug: Check for port conflicts before starting echo "=== Pre-start Port Check ===" netstat -tulpn | grep -E "(9337|8085|8892|8004|9328)" || echo "Ports are free" # Kill any existing weed processes that might interfere echo "=== Cleanup existing processes ===" pkill -f weed || echo "No weed processes found" # More aggressive port cleanup using multiple methods for port in 9337 8085 8892 8004 9328; do echo "Cleaning port $port..." # Method 1: lsof pid=$(lsof -ti :$port 2>/dev/null || echo "") if [ -n "$pid" ]; then echo "Found process $pid using port $port (via lsof)" kill -9 $pid 2>/dev/null || echo "Failed to kill $pid" fi # Method 2: netstat + ps (for cases where lsof fails) netstat_pids=$(netstat -tlnp 2>/dev/null | grep ":$port " | awk '{print $7}' | cut -d'/' -f1 | grep -v '^-$' || echo "") for npid in $netstat_pids; do if [ -n "$npid" ] && [ "$npid" != "-" ]; then echo "Found process $npid using port $port (via netstat)" kill -9 $npid 2>/dev/null || echo "Failed to kill $npid" fi done # Method 3: fuser (if available) if command -v fuser >/dev/null 2>&1; then fuser -k ${port}/tcp 2>/dev/null || echo "No process found via fuser for port $port" fi sleep 1 done # Wait for ports to be released sleep 5 echo "=== Post-cleanup Port Check ===" netstat -tulpn | grep -E "(9337|8085|8892|8004|9328)" || echo "All ports are now free" # If any ports are still in use, fail fast if netstat -tulpn | grep -E "(9337|8085|8892|8004|9328)" >/dev/null 2>&1; then echo "❌ ERROR: Some ports are still in use after aggressive cleanup!" echo "=== Detailed Port Analysis ===" for port in 9337 8085 8892 8004 9328; do echo "Port $port:" netstat -tlnp 2>/dev/null | grep ":$port " || echo " Not in use" lsof -i :$port 2>/dev/null || echo " No lsof info" done exit 1 fi go install -tags "sqlite" -buildvcs=false # Create clean data directory for this test run with unique timestamp and process ID export WEED_DATA_DIR="/tmp/seaweedfs-sql-test-$(date +%s)-$$" mkdir -p "$WEED_DATA_DIR" chmod 777 "$WEED_DATA_DIR" # SQLite-specific configuration export WEED_LEVELDB2_ENABLED="false" export WEED_SQLITE_ENABLED="true" export WEED_SQLITE_DBFILE="$WEED_DATA_DIR/filer.db" echo "=== SQL Store Configuration ===" echo "Data Dir: $WEED_DATA_DIR" echo "SQLite DB: $WEED_SQLITE_DBFILE" echo "LEVELDB2_ENABLED: $WEED_LEVELDB2_ENABLED" echo "SQLITE_ENABLED: $WEED_SQLITE_ENABLED" set -x weed -v 1 server -filer -filer.maxMB=64 -s3 -ip.bind 0.0.0.0 \ -dir="$WEED_DATA_DIR" \ -master.raftHashicorp -master.electionTimeout 1s -master.volumeSizeLimitMB=100 \ -volume.max=100 -volume.preStopSeconds=1 \ -master.port=9337 -volume.port=8085 -filer.port=8892 -s3.port=8004 -metricsPort=9328 \ -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config=../docker/compose/s3.json \ > /tmp/seaweedfs-sql-server.log 2>&1 & pid=$! echo "=== Server started with PID: $pid ===" # Wait for all SeaweedFS components to be ready echo "Waiting for SeaweedFS components to start..." # Check if server process is still alive before waiting if ! kill -0 $pid 2>/dev/null; then echo "❌ Server process died immediately after start" echo "=== Immediate Log Check ===" tail -20 /tmp/seaweedfs-sql-server.log 2>/dev/null || echo "No log available" exit 1 fi sleep 5 # Give SQLite more time to initialize for i in {1..30}; do if curl -s http://localhost:9337/cluster/status > /dev/null 2>&1; then echo "Master server is ready" break fi echo "Waiting for master server... ($i/30)" # Check if server process is still alive if ! kill -0 $pid 2>/dev/null; then echo "❌ Server process died while waiting for master" tail -20 /tmp/seaweedfs-sql-server.log 2>/dev/null exit 1 fi sleep 2 done for i in {1..30}; do if curl -s http://localhost:8085/status > /dev/null 2>&1; then echo "Volume server is ready" break fi echo "Waiting for volume server... ($i/30)" if ! kill -0 $pid 2>/dev/null; then echo "❌ Server process died while waiting for volume" tail -20 /tmp/seaweedfs-sql-server.log 2>/dev/null exit 1 fi sleep 2 done for i in {1..30}; do if curl -s http://localhost:8892/ > /dev/null 2>&1; then echo "Filer (SQLite) is ready" break fi echo "Waiting for filer (SQLite)... ($i/30)" if ! kill -0 $pid 2>/dev/null; then echo "❌ Server process died while waiting for filer" tail -20 /tmp/seaweedfs-sql-server.log 2>/dev/null exit 1 fi sleep 2 done # Extra wait for SQLite filer to fully initialize echo "Giving SQLite filer extra time to initialize..." sleep 5 for i in {1..30}; do if curl -s http://localhost:8004/ > /dev/null 2>&1; then echo "S3 server is ready" break fi echo "Waiting for S3 server... ($i/30)" if ! kill -0 $pid 2>/dev/null; then echo "❌ Server process died while waiting for S3" tail -20 /tmp/seaweedfs-sql-server.log 2>/dev/null exit 1 fi sleep 2 done echo "All SeaweedFS components are ready!" cd ../s3-tests sed -i "s/assert prefixes == \['foo%2B1\/', 'foo\/', 'quux%20ab\/'\]/assert prefixes == \['foo\/', 'foo%2B1\/', 'quux%20ab\/'\]/" s3tests_boto3/functional/test_s3.py # Create and update s3tests.conf to use port 8004 cp ../docker/compose/s3tests.conf ../docker/compose/s3tests-sql.conf sed -i 's/port = 8000/port = 8004/g' ../docker/compose/s3tests-sql.conf sed -i 's/:8000/:8004/g' ../docker/compose/s3tests-sql.conf sed -i 's/localhost:8000/localhost:8004/g' ../docker/compose/s3tests-sql.conf sed -i 's/127\.0\.0\.1:8000/127.0.0.1:8004/g' ../docker/compose/s3tests-sql.conf export S3TEST_CONF=../docker/compose/s3tests-sql.conf # Debug: Show the config file contents echo "=== S3 Config File Contents ===" cat ../docker/compose/s3tests-sql.conf echo "=== End Config ===" # Additional wait for S3-Filer integration to be fully ready echo "Waiting additional 10 seconds for S3-Filer integration..." sleep 10 # Test S3 connection before running tests echo "Testing S3 connection..." # Debug: Check if SeaweedFS processes are running echo "=== Process Status ===" ps aux | grep -E "(weed|seaweedfs)" | grep -v grep || echo "No SeaweedFS processes found" # Debug: Check port status echo "=== Port Status ===" netstat -tulpn | grep -E "(8004|9337|8085|8892)" || echo "Ports not found" # Debug: Check server logs echo "=== Recent Server Logs ===" echo "--- SQL Server Log ---" tail -20 /tmp/seaweedfs-sql-server.log 2>/dev/null || echo "No SQL server log found" echo "--- Other Logs ---" ls -la /tmp/seaweedfs-*.log 2>/dev/null || echo "No other log files found" for i in {1..10}; do if curl -s -f http://localhost:8004/ > /dev/null 2>&1; then echo "S3 connection test successful" break fi echo "S3 connection test failed, retrying... ($i/10)" # Debug: Try different HTTP methods echo "Debug: Testing different endpoints..." curl -s -I http://localhost:8004/ || echo "HEAD request failed" curl -s http://localhost:8004/status || echo "Status endpoint failed" sleep 2 done tox -- \ s3tests_boto3/functional/test_s3.py::test_bucket_list_empty \ s3tests_boto3/functional/test_s3.py::test_bucket_list_distinct \ s3tests_boto3/functional/test_s3.py::test_bucket_list_many \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_many \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_basic \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_encoding_basic \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_prefix \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_prefix_ends_with_delimiter \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_alt \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_prefix_underscore \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_percentage \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_whitespace \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_dot \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_unreadable \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_empty \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_none \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_delimiter_not_exist \ s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_delimiter_basic \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_delimiter_basic \ s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_delimiter_alt \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_delimiter_alt \ s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_delimiter_prefix_not_exist \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_delimiter_prefix_not_exist \ s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_delimiter_delimiter_not_exist \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_delimiter_delimiter_not_exist \ s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_delimiter_prefix_delimiter_not_exist \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_fetchowner_notempty \ s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_basic \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_basic \ s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_alt \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_alt \ s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_empty \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_empty \ s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_none \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_none \ s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_not_exist \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_not_exist \ s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_unreadable \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_prefix_unreadable \ s3tests_boto3/functional/test_s3.py::test_bucket_list_maxkeys_one \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_maxkeys_one \ s3tests_boto3/functional/test_s3.py::test_bucket_list_maxkeys_zero \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_maxkeys_zero \ s3tests_boto3/functional/test_s3.py::test_bucket_list_marker_none \ s3tests_boto3/functional/test_s3.py::test_bucket_list_marker_empty \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_continuationtoken_empty \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_continuationtoken \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_both_continuationtoken_startafter \ s3tests_boto3/functional/test_s3.py::test_bucket_list_marker_unreadable \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_startafter_unreadable \ s3tests_boto3/functional/test_s3.py::test_bucket_list_marker_not_in_list \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_startafter_not_in_list \ s3tests_boto3/functional/test_s3.py::test_bucket_list_marker_after_list \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_startafter_after_list \ s3tests_boto3/functional/test_s3.py::test_bucket_list_objects_anonymous_fail \ s3tests_boto3/functional/test_s3.py::test_bucket_listv2_objects_anonymous_fail \ s3tests_boto3/functional/test_s3.py::test_bucket_list_long_name \ s3tests_boto3/functional/test_s3.py::test_bucket_list_special_prefix \ s3tests_boto3/functional/test_s3.py::test_object_copy_zero_size \ s3tests_boto3/functional/test_s3.py::test_object_copy_same_bucket \ s3tests_boto3/functional/test_s3.py::test_object_copy_to_itself \ s3tests_boto3/functional/test_s3.py::test_object_copy_diff_bucket \ s3tests_boto3/functional/test_s3.py::test_object_copy_canned_acl \ s3tests_boto3/functional/test_s3.py::test_multipart_copy_small \ s3tests_boto3/functional/test_s3.py::test_multipart_copy_without_range \ s3tests_boto3/functional/test_s3.py::test_multipart_copy_special_names \ s3tests_boto3/functional/test_s3.py::test_multipart_copy_multiple_sizes \ s3tests_boto3/functional/test_s3.py::test_copy_object_ifmatch_good \ s3tests_boto3/functional/test_s3.py::test_copy_object_ifnonematch_failed \ s3tests_boto3/functional/test_s3.py::test_copy_object_ifmatch_failed \ s3tests_boto3/functional/test_s3.py::test_copy_object_ifnonematch_good kill -9 $pid || true # Clean up data directory rm -rf "$WEED_DATA_DIR" || true