1
0
Fork 0
mirror of https://github.com/chrislusf/seaweedfs synced 2024-05-20 10:20:00 +02:00

refactor use const CountEntryChunksForGzip

This commit is contained in:
Konstantin Lebedev 2022-05-01 22:28:55 +05:00
parent ec0ed41e37
commit 21033ff4c3
17 changed files with 55 additions and 36 deletions

View file

@ -156,7 +156,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Ent
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
}
if len(entry.Chunks) > 50 {
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
meta = util.MaybeGzipData(meta)
}

View file

@ -157,7 +157,7 @@ func (store *ArangodbStore) InsertEntry(ctx context.Context, entry *filer.Entry)
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
}
if len(entry.Chunks) > 50 {
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
meta = util.MaybeGzipData(meta)
}
model := &Model{
@ -196,7 +196,7 @@ func (store *ArangodbStore) UpdateEntry(ctx context.Context, entry *filer.Entry)
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
}
if len(entry.Chunks) > 50 {
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
meta = util.MaybeGzipData(meta)
}
model := &Model{

View file

@ -100,7 +100,7 @@ func (store *CassandraStore) InsertEntry(ctx context.Context, entry *filer.Entry
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
}
if len(entry.Chunks) > 50 {
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
meta = util.MaybeGzipData(meta)
}

View file

@ -82,7 +82,7 @@ func (store *EtcdStore) InsertEntry(ctx context.Context, entry *filer.Entry) (er
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
if len(entry.Chunks) > 50 {
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
meta = weed_util.MaybeGzipData(meta)
}

View file

@ -7,6 +7,8 @@ import (
"io"
)
const CountEntryChunksForGzip = 50
var (
ErrUnsupportedListDirectoryPrefixed = errors.New("unsupported directory prefix listing")
ErrUnsupportedSuperLargeDirectoryListing = errors.New("unsupported super large directory listing")

View file

@ -75,7 +75,7 @@ func (store *HbaseStore) InsertEntry(ctx context.Context, entry *filer.Entry) er
if err != nil {
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
if len(entry.Chunks) > 50 {
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
value = util.MaybeGzipData(value)
}

View file

@ -86,7 +86,7 @@ func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer.Entry)
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
if len(entry.Chunks) > 50 {
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
value = weed_util.MaybeGzipData(value)
}

View file

@ -88,7 +88,7 @@ func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer.Entry)
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
if len(entry.Chunks) > 50 {
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
value = weed_util.MaybeGzipData(value)
}

View file

@ -177,7 +177,7 @@ func (store *LevelDB3Store) InsertEntry(ctx context.Context, entry *filer.Entry)
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
if len(entry.Chunks) > 50 {
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
value = weed_util.MaybeGzipData(value)
}

View file

@ -107,7 +107,7 @@ func (store *MongodbStore) UpdateEntry(ctx context.Context, entry *filer.Entry)
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
}
if len(entry.Chunks) > 50 {
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
meta = util.MaybeGzipData(meta)
}

View file

@ -40,7 +40,7 @@ func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer.
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
if len(entry.Chunks) > 50 {
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
value = util.MaybeGzipData(value)
}

View file

@ -52,7 +52,7 @@ func (store *UniversalRedis2Store) InsertEntry(ctx context.Context, entry *filer
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
if len(entry.Chunks) > 50 {
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
value = util.MaybeGzipData(value)
}

View file

@ -40,7 +40,7 @@ func (store *UniversalRedis3Store) InsertEntry(ctx context.Context, entry *filer
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
if len(entry.Chunks) > 50 {
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
value = util.MaybeGzipData(value)
}

View file

@ -53,7 +53,7 @@ func (store *UniversalRedisLuaStore) InsertEntry(ctx context.Context, entry *fil
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
if len(entry.Chunks) > 50 {
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
value = util.MaybeGzipData(value)
}

View file

@ -8,14 +8,9 @@ options:
```
[ydb]
enabled=true
db_name="seaweedfs"
servers=["http://localhost:8529"]
#basic auth
user="root"
pass="test"
# tls settings
insecure_skip_verify=true
prefix="seaweedfs"
useBucketPrefix=true
coonectionUrl=grpcs://ydb-ru.yandex.net:2135/?database=/ru/home/username/db
```
get ydb types

View file

@ -67,6 +67,6 @@ const (
SELECT name, meta
FROM file_meta
WHERE dir_hash == $dir_hash AND directory == $directory and name %v $start_name and name LIKE '$prefix%'
WHERE dir_hash == $dir_hash AND directory == $directory and name %s $start_name and name LIKE '$prefix%%'
ORDER BY name ASC LIMIT $limit;`
)

View file

@ -15,6 +15,10 @@ import (
"time"
)
const (
defaultConnectionTimeOut = 10
)
var (
roTX = table.TxControl(
table.BeginTx(table.WithOnlineReadOnly()),
@ -29,8 +33,6 @@ var (
type YdbStore struct {
SupportBucketTable bool
DB *connect.Connection
connParams connect.ConnectParams
connCtx context.Context
dirBuckets string
tablePathPrefix string
}
@ -44,16 +46,27 @@ func (store *YdbStore) GetName() string {
}
func (store *YdbStore) Initialize(configuration util.Configuration, prefix string) (err error) {
return store.initialize(configuration.GetString(prefix + "coonectionUrl"))
return store.initialize(
configuration.GetString("filer.options.buckets_folder"),
configuration.GetString(prefix+"coonectionUrl"),
configuration.GetString(prefix+"tablePathPrefix"),
configuration.GetBool(prefix+"useBucketPrefix"),
configuration.GetInt(prefix+"connectionTimeOut"),
)
}
func (store *YdbStore) initialize(sqlUrl string) (err error) {
store.SupportBucketTable = false
func (store *YdbStore) initialize(dirBuckets string, sqlUrl string, tablePathPrefix string, useBucketPrefix bool, connectionTimeOut int) (err error) {
store.dirBuckets = dirBuckets
store.tablePathPrefix = tablePathPrefix
store.SupportBucketTable = useBucketPrefix
if connectionTimeOut == 0 {
connectionTimeOut = defaultConnectionTimeOut
}
var cancel context.CancelFunc
store.connCtx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
connCtx, cancel := context.WithTimeout(context.Background(), time.Duration(connectionTimeOut)*time.Second)
defer cancel()
store.connParams = connect.MustConnectionString(sqlUrl)
store.DB, err = connect.New(store.connCtx, store.connParams)
connParams := connect.MustConnectionString(sqlUrl)
store.DB, err = connect.New(connCtx, connParams)
if err != nil {
store.DB.Close()
store.DB = nil
@ -61,7 +74,7 @@ func (store *YdbStore) initialize(sqlUrl string) (err error) {
}
defer store.DB.Close()
if err = store.DB.EnsurePathExists(store.connCtx, store.connParams.Database()); err != nil {
if err = store.DB.EnsurePathExists(connCtx, connParams.Database()); err != nil {
return fmt.Errorf("connect to %s error:%v", sqlUrl, err)
}
return nil
@ -73,6 +86,11 @@ func (store *YdbStore) insertOrUpdateEntry(ctx context.Context, entry *filer.Ent
if err != nil {
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
}
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
meta = util.MaybeGzipData(meta)
}
fileMeta := FileMeta{util.HashStringToLong(dir), name, dir, meta}
return table.Retry(ctx, store.DB.Table().Pool(),
table.OperationFunc(func(ctx context.Context, s *table.Session) (err error) {
@ -114,7 +132,7 @@ func (store *YdbStore) FindEntry(ctx context.Context, fullpath util.FullPath) (e
}
defer res.Close()
for res.NextSet() {
for res.NextResultSet(ctx) {
for res.NextRow() {
res.SeekItem("meta")
entry.FullPath = fullpath
@ -251,17 +269,21 @@ func (store *YdbStore) Shutdown() {
}
func (store *YdbStore) getPrefix(dir string) string {
if !store.SupportBucketTable {
return store.tablePathPrefix
}
prefixBuckets := store.dirBuckets + "/"
if strings.HasPrefix(dir, prefixBuckets) {
// detect bucket
bucketAndDir := dir[len(prefixBuckets):]
if t := strings.Index(bucketAndDir, "/"); t > 0 {
return bucketAndDir[:t]
return path.Join(bucketAndDir[:t], store.tablePathPrefix)
}
}
return ""
return store.tablePathPrefix
}
func (store *YdbStore) withPragma(prefix, query string) string {
return `PRAGMA TablePathPrefix("` + path.Join(store.tablePathPrefix, prefix) + `");` + query
return `PRAGMA TablePathPrefix("` + prefix + `");` + query
}