From 70a4c98b00d811c01289f26d3602992a0d3f45e1 Mon Sep 17 00:00:00 2001 From: chrislu Date: Tue, 15 Nov 2022 06:33:36 -0800 Subject: [PATCH] refactor filer_pb.Entry and filer.Entry to use GetChunks() for later locking on reading chunks --- unmaintained/see_meta/see_meta.go | 2 +- weed/command/filer_cat.go | 2 +- weed/filer/abstract_sql/abstract_sql_store.go | 2 +- weed/filer/arangodb/arangodb_store.go | 4 ++-- weed/filer/cassandra/cassandra_store.go | 2 +- weed/filer/entry.go | 8 ++++++-- weed/filer/etcd/etcd_store.go | 2 +- weed/filer/filechunks.go | 6 +++--- weed/filer/filer_conf.go | 2 +- weed/filer/filer_delete_entry.go | 4 ++-- weed/filer/filer_deletion.go | 10 +++++----- weed/filer/filer_notify.go | 2 +- weed/filer/filer_notify_append.go | 4 ++-- weed/filer/filer_notify_test.go | 2 +- weed/filer/filer_on_meta_event.go | 2 +- weed/filer/filerstore_wrapper.go | 10 +++++----- weed/filer/hbase/hbase_store.go | 2 +- weed/filer/leveldb/leveldb_store.go | 6 +++--- weed/filer/leveldb2/leveldb2_store.go | 8 ++++---- weed/filer/leveldb3/leveldb3_store.go | 8 ++++---- weed/filer/mongodb/mongodb_store.go | 2 +- weed/filer/read_remote.go | 2 +- weed/filer/read_write.go | 2 +- weed/filer/redis/universal_redis_store.go | 2 +- weed/filer/redis2/universal_redis_store.go | 2 +- weed/filer/redis3/universal_redis_store.go | 2 +- weed/filer/redis_lua/universal_redis_store.go | 2 +- weed/filer/rocksdb/rocksdb_store.go | 6 +++--- weed/filer/stream.go | 4 ++-- weed/filer/ydb/ydb_store.go | 2 +- weed/mount/filehandle.go | 6 +++--- weed/mount/filehandle_read.go | 2 +- weed/mount/weedfs_attr.go | 4 ++-- weed/mount/weedfs_file_lseek.go | 2 +- weed/mount/weedfs_file_sync.go | 6 +++--- weed/mount/weedfs_link.go | 2 +- weed/pb/filer_pb/filer.pb.go | 2 +- weed/pb/filer_pb/filer_pb_helper.go | 2 +- weed/replication/sink/azuresink/azure_sink.go | 2 +- weed/replication/sink/b2sink/b2_sink.go | 2 +- weed/replication/sink/filersink/filer_sink.go | 12 ++++++------ weed/replication/sink/gcssink/gcs_sink.go | 2 +- weed/replication/sink/localsink/local_sink.go | 2 +- weed/s3api/filer_multipart.go | 4 ++-- weed/server/filer_grpc_server.go | 12 ++++++------ weed/server/filer_grpc_server_remote.go | 2 +- weed/server/filer_grpc_server_rename.go | 2 +- weed/server/filer_server_handlers_read.go | 8 ++++---- weed/server/filer_server_handlers_write_autochunk.go | 2 +- weed/server/filer_server_handlers_write_cipher.go | 2 +- weed/server/webdav_server.go | 6 +++--- weed/shell/command_fs_cat.go | 2 +- weed/shell/command_fs_du.go | 2 +- weed/shell/command_fs_ls.go | 2 +- weed/shell/command_fs_meta_cat.go | 4 ++-- weed/shell/command_volume_fsck.go | 2 +- 56 files changed, 107 insertions(+), 103 deletions(-) diff --git a/unmaintained/see_meta/see_meta.go b/unmaintained/see_meta/see_meta.go index 405aed0ba..6fc88358c 100644 --- a/unmaintained/see_meta/see_meta.go +++ b/unmaintained/see_meta/see_meta.go @@ -59,7 +59,7 @@ func walkMetaFile(dst *os.File) error { } fmt.Fprintf(os.Stdout, "file %s %v\n", util.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name), fullEntry.Entry.Attributes.String()) - for i, chunk := range fullEntry.Entry.Chunks { + for i, chunk := range fullEntry.Entry.GetChunks() { fmt.Fprintf(os.Stdout, " chunk: %d %v %d,%x%08x\n", i+1, chunk, chunk.Fid.VolumeId, chunk.Fid.FileKey, chunk.Fid.Cookie) } diff --git a/weed/command/filer_cat.go b/weed/command/filer_cat.go index c5ae7672e..c310b2b43 100644 --- a/weed/command/filer_cat.go +++ b/weed/command/filer_cat.go @@ -114,7 +114,7 @@ func runFilerCat(cmd *Command, args []string) bool { filerCat.filerClient = client - return filer.StreamContent(&filerCat, writer, respLookupEntry.Entry.Chunks, 0, int64(filer.FileSize(respLookupEntry.Entry))) + return filer.StreamContent(&filerCat, writer, respLookupEntry.Entry.GetChunks(), 0, int64(filer.FileSize(respLookupEntry.Entry))) }) diff --git a/weed/filer/abstract_sql/abstract_sql_store.go b/weed/filer/abstract_sql/abstract_sql_store.go index 40b719f27..fdfe13d20 100644 --- a/weed/filer/abstract_sql/abstract_sql_store.go +++ b/weed/filer/abstract_sql/abstract_sql_store.go @@ -158,7 +158,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Ent return fmt.Errorf("encode %s: %s", entry.FullPath, err) } - if len(entry.Chunks) > filer.CountEntryChunksForGzip { + if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { meta = util.MaybeGzipData(meta) } diff --git a/weed/filer/arangodb/arangodb_store.go b/weed/filer/arangodb/arangodb_store.go index ab5f8db4f..457b5f28b 100644 --- a/weed/filer/arangodb/arangodb_store.go +++ b/weed/filer/arangodb/arangodb_store.go @@ -157,7 +157,7 @@ func (store *ArangodbStore) InsertEntry(ctx context.Context, entry *filer.Entry) return fmt.Errorf("encode %s: %s", entry.FullPath, err) } - if len(entry.Chunks) > filer.CountEntryChunksForGzip { + if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { meta = util.MaybeGzipData(meta) } model := &Model{ @@ -196,7 +196,7 @@ func (store *ArangodbStore) UpdateEntry(ctx context.Context, entry *filer.Entry) return fmt.Errorf("encode %s: %s", entry.FullPath, err) } - if len(entry.Chunks) > filer.CountEntryChunksForGzip { + if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { meta = util.MaybeGzipData(meta) } model := &Model{ diff --git a/weed/filer/cassandra/cassandra_store.go b/weed/filer/cassandra/cassandra_store.go index 49ff29c44..b13a50fd3 100644 --- a/weed/filer/cassandra/cassandra_store.go +++ b/weed/filer/cassandra/cassandra_store.go @@ -100,7 +100,7 @@ func (store *CassandraStore) InsertEntry(ctx context.Context, entry *filer.Entry return fmt.Errorf("encode %s: %s", entry.FullPath, err) } - if len(entry.Chunks) > filer.CountEntryChunksForGzip { + if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { meta = util.MaybeGzipData(meta) } diff --git a/weed/filer/entry.go b/weed/filer/entry.go index 8c28c8513..2062401f7 100644 --- a/weed/filer/entry.go +++ b/weed/filer/entry.go @@ -46,7 +46,7 @@ type Entry struct { } func (entry *Entry) Size() uint64 { - return maxUint64(maxUint64(TotalSize(entry.Chunks), entry.FileSize), uint64(len(entry.Content))) + return maxUint64(maxUint64(TotalSize(entry.GetChunks()), entry.FileSize), uint64(len(entry.Content))) } func (entry *Entry) Timestamp() time.Time { @@ -91,7 +91,7 @@ func (entry *Entry) ToExistingProtoEntry(message *filer_pb.Entry) { } message.IsDirectory = entry.IsDirectory() message.Attributes = EntryAttributeToPb(entry) - message.Chunks = entry.Chunks + message.Chunks = entry.GetChunks() message.Extended = entry.Extended message.HardLinkId = entry.HardLinkId message.HardLinkCounter = entry.HardLinkCounter @@ -123,6 +123,10 @@ func (entry *Entry) ToProtoFullEntry() *filer_pb.FullEntry { } } +func (entry *Entry) GetChunks() []*filer_pb.FileChunk { + return entry.Chunks +} + func FromPbEntry(dir string, entry *filer_pb.Entry) *Entry { t := &Entry{} t.FullPath = util.NewFullPath(dir, entry.Name) diff --git a/weed/filer/etcd/etcd_store.go b/weed/filer/etcd/etcd_store.go index 26b9ae1f6..b2e0fedda 100644 --- a/weed/filer/etcd/etcd_store.go +++ b/weed/filer/etcd/etcd_store.go @@ -82,7 +82,7 @@ func (store *EtcdStore) InsertEntry(ctx context.Context, entry *filer.Entry) (er return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) } - if len(entry.Chunks) > filer.CountEntryChunksForGzip { + if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { meta = weed_util.MaybeGzipData(meta) } diff --git a/weed/filer/filechunks.go b/weed/filer/filechunks.go index 965c73a77..de57a0532 100644 --- a/weed/filer/filechunks.go +++ b/weed/filer/filechunks.go @@ -31,19 +31,19 @@ func FileSize(entry *filer_pb.Entry) (size uint64) { fileSize = maxUint64(fileSize, uint64(entry.RemoteEntry.RemoteSize)) } } - return maxUint64(TotalSize(entry.Chunks), fileSize) + return maxUint64(TotalSize(entry.GetChunks()), fileSize) } func ETag(entry *filer_pb.Entry) (etag string) { if entry.Attributes == nil || entry.Attributes.Md5 == nil { - return ETagChunks(entry.Chunks) + return ETagChunks(entry.GetChunks()) } return fmt.Sprintf("%x", entry.Attributes.Md5) } func ETagEntry(entry *Entry) (etag string) { if entry.Attr.Md5 == nil { - return ETagChunks(entry.Chunks) + return ETagChunks(entry.GetChunks()) } return fmt.Sprintf("%x", entry.Attr.Md5) } diff --git a/weed/filer/filer_conf.go b/weed/filer/filer_conf.go index d3eb32988..ccb1acb3c 100644 --- a/weed/filer/filer_conf.go +++ b/weed/filer/filer_conf.go @@ -75,7 +75,7 @@ func (fc *FilerConf) loadFromFiler(filer *Filer) (err error) { return fc.LoadFromBytes(entry.Content) } - return fc.loadFromChunks(filer, entry.Content, entry.Chunks, entry.Size()) + return fc.loadFromChunks(filer, entry.Content, entry.GetChunks(), entry.Size()) } func (fc *FilerConf) loadFromChunks(filer *Filer, content []byte, chunks []*filer_pb.FileChunk, size uint64) (err error) { diff --git a/weed/filer/filer_delete_entry.go b/weed/filer/filer_delete_entry.go index 2ea20ea64..87a58fd86 100644 --- a/weed/filer/filer_delete_entry.go +++ b/weed/filer/filer_delete_entry.go @@ -48,7 +48,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR } if shouldDeleteChunks && !isDeleteCollection { - f.DirectDeleteChunks(entry.Chunks) + f.DirectDeleteChunks(entry.GetChunks()) } // delete the file or folder @@ -93,7 +93,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry // hard link chunk data are deleted separately err = onHardLinkIdsFn([]HardLinkId{sub.HardLinkId}) } else { - err = onChunksFn(sub.Chunks) + err = onChunksFn(sub.GetChunks()) } } if err != nil && !ignoreRecursiveError { diff --git a/weed/filer/filer_deletion.go b/weed/filer/filer_deletion.go index 504ee2986..439a5296f 100644 --- a/weed/filer/filer_deletion.go +++ b/weed/filer/filer_deletion.go @@ -143,17 +143,17 @@ func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) { return } if newEntry == nil { - f.DeleteChunks(oldEntry.Chunks) + f.DeleteChunks(oldEntry.GetChunks()) return } var toDelete []*filer_pb.FileChunk newChunkIds := make(map[string]bool) newDataChunks, newManifestChunks, err := ResolveChunkManifest(f.MasterClient.GetLookupFileIdFunction(), - newEntry.Chunks, 0, math.MaxInt64) + newEntry.GetChunks(), 0, math.MaxInt64) if err != nil { glog.Errorf("Failed to resolve new entry chunks when delete old entry chunks. new: %s, old: %s", - newEntry.Chunks, oldEntry.Chunks) + newEntry.GetChunks(), oldEntry.Chunks) return } for _, newChunk := range newDataChunks { @@ -164,10 +164,10 @@ func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) { } oldDataChunks, oldManifestChunks, err := ResolveChunkManifest(f.MasterClient.GetLookupFileIdFunction(), - oldEntry.Chunks, 0, math.MaxInt64) + oldEntry.GetChunks(), 0, math.MaxInt64) if err != nil { glog.Errorf("Failed to resolve old entry chunks when delete old entry chunks. new: %s, old: %s", - newEntry.Chunks, oldEntry.Chunks) + newEntry.GetChunks(), oldEntry.GetChunks()) return } for _, oldChunk := range oldDataChunks { diff --git a/weed/filer/filer_notify.go b/weed/filer/filer_notify.go index 7033a70c4..77b659288 100644 --- a/weed/filer/filer_notify.go +++ b/weed/filer/filer_notify.go @@ -153,7 +153,7 @@ func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, stopTsNs int64, each } } // println("processing", hourMinuteEntry.FullPath) - chunkedFileReader := NewChunkStreamReaderFromFiler(f.MasterClient, hourMinuteEntry.Chunks) + chunkedFileReader := NewChunkStreamReaderFromFiler(f.MasterClient, hourMinuteEntry.GetChunks()) if lastTsNs, err = ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, stopTsNs, eachLogEntryFn); err != nil { chunkedFileReader.Close() if err == io.EOF { diff --git a/weed/filer/filer_notify_append.go b/weed/filer/filer_notify_append.go index 94d976f1e..5c03d4f16 100644 --- a/weed/filer/filer_notify_append.go +++ b/weed/filer/filer_notify_append.go @@ -36,11 +36,11 @@ func (f *Filer) appendToFile(targetFile string, data []byte) error { } else if err != nil { return fmt.Errorf("find %s: %v", fullpath, err) } else { - offset = int64(TotalSize(entry.Chunks)) + offset = int64(TotalSize(entry.GetChunks())) } // append to existing chunks - entry.Chunks = append(entry.Chunks, uploadResult.ToPbFileChunk(assignResult.Fid, offset)) + entry.Chunks = append(entry.GetChunks(), uploadResult.ToPbFileChunk(assignResult.Fid, offset)) // update the entry err = f.CreateEntry(context.Background(), entry, false, false, nil, false) diff --git a/weed/filer/filer_notify_test.go b/weed/filer/filer_notify_test.go index 9ad58629a..af99d7015 100644 --- a/weed/filer/filer_notify_test.go +++ b/weed/filer/filer_notify_test.go @@ -44,7 +44,7 @@ func TestProtoMarshal(t *testing.T) { notification2 := &filer_pb.EventNotification{} proto.Unmarshal(text, notification2) - if notification2.OldEntry.Chunks[0].SourceFileId != notification.OldEntry.Chunks[0].SourceFileId { + if notification2.OldEntry.GetChunks()[0].SourceFileId != notification.OldEntry.GetChunks()[0].SourceFileId { t.Fatalf("marshal/unmarshal error: %s", text) } diff --git a/weed/filer/filer_on_meta_event.go b/weed/filer/filer_on_meta_event.go index c36bce577..6cec80148 100644 --- a/weed/filer/filer_on_meta_event.go +++ b/weed/filer/filer_on_meta_event.go @@ -60,7 +60,7 @@ func (f *Filer) readEntry(chunks []*filer_pb.FileChunk, size uint64) ([]byte, er func (f *Filer) reloadFilerConfiguration(entry *filer_pb.Entry) { fc := NewFilerConf() - err := fc.loadFromChunks(f, entry.Content, entry.Chunks, FileSize(entry)) + err := fc.loadFromChunks(f, entry.Content, entry.GetChunks(), FileSize(entry)) if err != nil { glog.Errorf("read filer conf chunks: %v", err) return diff --git a/weed/filer/filerstore_wrapper.go b/weed/filer/filerstore_wrapper.go index 493ba845a..19dfeac96 100644 --- a/weed/filer/filerstore_wrapper.go +++ b/weed/filer/filerstore_wrapper.go @@ -118,7 +118,7 @@ func (fsw *FilerStoreWrapper) InsertEntry(ctx context.Context, entry *Entry) err stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "insert").Observe(time.Since(start).Seconds()) }() - filer_pb.BeforeEntrySerialization(entry.Chunks) + filer_pb.BeforeEntrySerialization(entry.GetChunks()) if entry.Mime == "application/octet-stream" { entry.Mime = "" } @@ -139,7 +139,7 @@ func (fsw *FilerStoreWrapper) UpdateEntry(ctx context.Context, entry *Entry) err stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "update").Observe(time.Since(start).Seconds()) }() - filer_pb.BeforeEntrySerialization(entry.Chunks) + filer_pb.BeforeEntrySerialization(entry.GetChunks()) if entry.Mime == "application/octet-stream" { entry.Mime = "" } @@ -168,7 +168,7 @@ func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp util.FullPath) ( fsw.maybeReadHardLink(ctx, entry) - filer_pb.AfterEntryDeserialization(entry.Chunks) + filer_pb.AfterEntryDeserialization(entry.GetChunks()) return } @@ -239,7 +239,7 @@ func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath // glog.V(4).Infof("ListDirectoryEntries %s from %s limit %d", dirPath, startFileName, limit) return actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, func(entry *Entry) bool { fsw.maybeReadHardLink(ctx, entry) - filer_pb.AfterEntryDeserialization(entry.Chunks) + filer_pb.AfterEntryDeserialization(entry.GetChunks()) return eachEntryFunc(entry) }) } @@ -257,7 +257,7 @@ func (fsw *FilerStoreWrapper) ListDirectoryPrefixedEntries(ctx context.Context, // glog.V(4).Infof("ListDirectoryPrefixedEntries %s from %s prefix %s limit %d", dirPath, startFileName, prefix, limit) adjustedEntryFunc := func(entry *Entry) bool { fsw.maybeReadHardLink(ctx, entry) - filer_pb.AfterEntryDeserialization(entry.Chunks) + filer_pb.AfterEntryDeserialization(entry.GetChunks()) return eachEntryFunc(entry) } lastFileName, err = actualStore.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, prefix, adjustedEntryFunc) diff --git a/weed/filer/hbase/hbase_store.go b/weed/filer/hbase/hbase_store.go index 1bd5b519f..1a0e3c893 100644 --- a/weed/filer/hbase/hbase_store.go +++ b/weed/filer/hbase/hbase_store.go @@ -75,7 +75,7 @@ func (store *HbaseStore) InsertEntry(ctx context.Context, entry *filer.Entry) er if err != nil { return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) } - if len(entry.Chunks) > filer.CountEntryChunksForGzip { + if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { value = util.MaybeGzipData(value) } diff --git a/weed/filer/leveldb/leveldb_store.go b/weed/filer/leveldb/leveldb_store.go index ebf63e5d3..747d1104d 100644 --- a/weed/filer/leveldb/leveldb_store.go +++ b/weed/filer/leveldb/leveldb_store.go @@ -86,7 +86,7 @@ func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer.Entry) return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) } - if len(entry.Chunks) > filer.CountEntryChunksForGzip { + if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { value = weed_util.MaybeGzipData(value) } @@ -96,7 +96,7 @@ func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer.Entry) return fmt.Errorf("persisting %s : %v", entry.FullPath, err) } - // println("saved", entry.FullPath, "chunks", len(entry.Chunks)) + // println("saved", entry.FullPath, "chunks", len(entry.GetChunks())) return nil } @@ -126,7 +126,7 @@ func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath weed_util.Ful return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) } - // println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data)) + // println("read", entry.FullPath, "chunks", len(entry.GetChunks()), "data", len(data), string(data)) return entry, nil } diff --git a/weed/filer/leveldb2/leveldb2_store.go b/weed/filer/leveldb2/leveldb2_store.go index 9befbdc1c..78d15382f 100644 --- a/weed/filer/leveldb2/leveldb2_store.go +++ b/weed/filer/leveldb2/leveldb2_store.go @@ -88,7 +88,7 @@ func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer.Entry) return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) } - if len(entry.Chunks) > filer.CountEntryChunksForGzip { + if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { value = weed_util.MaybeGzipData(value) } @@ -98,7 +98,7 @@ func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer.Entry) return fmt.Errorf("persisting %s : %v", entry.FullPath, err) } - // println("saved", entry.FullPath, "chunks", len(entry.Chunks)) + // println("saved", entry.FullPath, "chunks", len(entry.GetChunks())) return nil } @@ -129,7 +129,7 @@ func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath weed_util.Fu return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) } - // println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data)) + // println("read", entry.FullPath, "chunks", len(entry.GetChunks()), "data", len(data), string(data)) return entry, nil } @@ -208,7 +208,7 @@ func (store *LevelDB2Store) ListDirectoryPrefixedEntries(ctx context.Context, di FullPath: weed_util.NewFullPath(string(dirPath), fileName), } - // println("list", entry.FullPath, "chunks", len(entry.Chunks)) + // println("list", entry.FullPath, "chunks", len(entry.GetChunks())) if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil { err = decodeErr glog.V(0).Infof("list %s : %v", entry.FullPath, err) diff --git a/weed/filer/leveldb3/leveldb3_store.go b/weed/filer/leveldb3/leveldb3_store.go index 4ceeb0bbb..406dc80be 100644 --- a/weed/filer/leveldb3/leveldb3_store.go +++ b/weed/filer/leveldb3/leveldb3_store.go @@ -185,7 +185,7 @@ func (store *LevelDB3Store) InsertEntry(ctx context.Context, entry *filer.Entry) return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) } - if len(entry.Chunks) > filer.CountEntryChunksForGzip { + if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { value = weed_util.MaybeGzipData(value) } @@ -195,7 +195,7 @@ func (store *LevelDB3Store) InsertEntry(ctx context.Context, entry *filer.Entry) return fmt.Errorf("persisting %s : %v", entry.FullPath, err) } - // println("saved", entry.FullPath, "chunks", len(entry.Chunks)) + // println("saved", entry.FullPath, "chunks", len(entry.GetChunks())) return nil } @@ -232,7 +232,7 @@ func (store *LevelDB3Store) FindEntry(ctx context.Context, fullpath weed_util.Fu return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) } - // println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data)) + // println("read", entry.FullPath, "chunks", len(entry.GetChunks()), "data", len(data), string(data)) return entry, nil } @@ -336,7 +336,7 @@ func (store *LevelDB3Store) ListDirectoryPrefixedEntries(ctx context.Context, di FullPath: weed_util.NewFullPath(string(dirPath), fileName), } - // println("list", entry.FullPath, "chunks", len(entry.Chunks)) + // println("list", entry.FullPath, "chunks", len(entry.GetChunks())) if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil { err = decodeErr glog.V(0).Infof("list %s : %v", entry.FullPath, err) diff --git a/weed/filer/mongodb/mongodb_store.go b/weed/filer/mongodb/mongodb_store.go index f4ab56603..49190058b 100644 --- a/weed/filer/mongodb/mongodb_store.go +++ b/weed/filer/mongodb/mongodb_store.go @@ -107,7 +107,7 @@ func (store *MongodbStore) UpdateEntry(ctx context.Context, entry *filer.Entry) return fmt.Errorf("encode %s: %s", entry.FullPath, err) } - if len(entry.Chunks) > filer.CountEntryChunksForGzip { + if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { meta = util.MaybeGzipData(meta) } diff --git a/weed/filer/read_remote.go b/weed/filer/read_remote.go index cf0d8cbee..992d1e95a 100644 --- a/weed/filer/read_remote.go +++ b/weed/filer/read_remote.go @@ -8,7 +8,7 @@ import ( ) func (entry *Entry) IsInRemoteOnly() bool { - return len(entry.Chunks) == 0 && entry.Remote != nil && entry.Remote.RemoteSize > 0 + return len(entry.GetChunks()) == 0 && entry.Remote != nil && entry.Remote.RemoteSize > 0 } func MapFullPathToRemoteStorageLocation(localMountedDir util.FullPath, remoteMountedLocation *remote_pb.RemoteStorageLocation, fp util.FullPath) *remote_pb.RemoteStorageLocation { diff --git a/weed/filer/read_write.go b/weed/filer/read_write.go index 3aef33a03..dc4f3d0d8 100644 --- a/weed/filer/read_write.go +++ b/weed/filer/read_write.go @@ -23,7 +23,7 @@ func ReadEntry(masterClient *wdclient.MasterClient, filerClient filer_pb.Seaweed return err } - return StreamContent(masterClient, byteBuffer, respLookupEntry.Entry.Chunks, 0, int64(FileSize(respLookupEntry.Entry))) + return StreamContent(masterClient, byteBuffer, respLookupEntry.Entry.GetChunks(), 0, int64(FileSize(respLookupEntry.Entry))) } diff --git a/weed/filer/redis/universal_redis_store.go b/weed/filer/redis/universal_redis_store.go index 8e1fa326b..e56a6bf3c 100644 --- a/weed/filer/redis/universal_redis_store.go +++ b/weed/filer/redis/universal_redis_store.go @@ -56,7 +56,7 @@ func (store *UniversalRedisStore) doInsertEntry(ctx context.Context, entry *file return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) } - if len(entry.Chunks) > filer.CountEntryChunksForGzip { + if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { value = util.MaybeGzipData(value) } diff --git a/weed/filer/redis2/universal_redis_store.go b/weed/filer/redis2/universal_redis_store.go index 0c79c5255..6b0e65c3d 100644 --- a/weed/filer/redis2/universal_redis_store.go +++ b/weed/filer/redis2/universal_redis_store.go @@ -71,7 +71,7 @@ func (store *UniversalRedis2Store) doInsertEntry(ctx context.Context, entry *fil return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) } - if len(entry.Chunks) > filer.CountEntryChunksForGzip { + if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { value = util.MaybeGzipData(value) } diff --git a/weed/filer/redis3/universal_redis_store.go b/weed/filer/redis3/universal_redis_store.go index 88d4ed1e3..2fb9a5b3f 100644 --- a/weed/filer/redis3/universal_redis_store.go +++ b/weed/filer/redis3/universal_redis_store.go @@ -56,7 +56,7 @@ func (store *UniversalRedis3Store) doInsertEntry(ctx context.Context, entry *fil return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) } - if len(entry.Chunks) > filer.CountEntryChunksForGzip { + if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { value = util.MaybeGzipData(value) } diff --git a/weed/filer/redis_lua/universal_redis_store.go b/weed/filer/redis_lua/universal_redis_store.go index 7da279fb6..59c128030 100644 --- a/weed/filer/redis_lua/universal_redis_store.go +++ b/weed/filer/redis_lua/universal_redis_store.go @@ -53,7 +53,7 @@ func (store *UniversalRedisLuaStore) InsertEntry(ctx context.Context, entry *fil return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) } - if len(entry.Chunks) > filer.CountEntryChunksForGzip { + if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { value = util.MaybeGzipData(value) } diff --git a/weed/filer/rocksdb/rocksdb_store.go b/weed/filer/rocksdb/rocksdb_store.go index 45bee62f7..f860f528a 100644 --- a/weed/filer/rocksdb/rocksdb_store.go +++ b/weed/filer/rocksdb/rocksdb_store.go @@ -108,7 +108,7 @@ func (store *RocksDBStore) InsertEntry(ctx context.Context, entry *filer.Entry) return fmt.Errorf("persisting %s : %v", entry.FullPath, err) } - // println("saved", entry.FullPath, "chunks", len(entry.Chunks)) + // println("saved", entry.FullPath, "chunks", len(entry.GetChunks())) return nil } @@ -140,7 +140,7 @@ func (store *RocksDBStore) FindEntry(ctx context.Context, fullpath weed_util.Ful return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) } - // println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data)) + // println("read", entry.FullPath, "chunks", len(entry.GetChunks()), "data", len(data), string(data)) return entry, nil } @@ -259,7 +259,7 @@ func (store *RocksDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dir } lastFileName = fileName - // println("list", entry.FullPath, "chunks", len(entry.Chunks)) + // println("list", entry.FullPath, "chunks", len(entry.GetChunks())) if decodeErr := entry.DecodeAttributesAndChunks(value); decodeErr != nil { err = decodeErr glog.V(0).Infof("list %s : %v", entry.FullPath, err) diff --git a/weed/filer/stream.go b/weed/filer/stream.go index fdbcfc4ec..f28341be4 100644 --- a/weed/filer/stream.go +++ b/weed/filer/stream.go @@ -30,7 +30,7 @@ func HasData(entry *filer_pb.Entry) bool { return true } - return len(entry.Chunks) > 0 + return len(entry.GetChunks()) > 0 } func IsSameData(a, b *filer_pb.Entry) bool { @@ -64,7 +64,7 @@ func NewFileReader(filerClient filer_pb.FilerClient, entry *filer_pb.Entry) io.R if len(entry.Content) > 0 { return bytes.NewReader(entry.Content) } - return NewChunkStreamReader(filerClient, entry.Chunks) + return NewChunkStreamReader(filerClient, entry.GetChunks()) } func StreamContent(masterClient wdclient.HasLookupFileIdFunction, writer io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64) error { diff --git a/weed/filer/ydb/ydb_store.go b/weed/filer/ydb/ydb_store.go index 7fa4b2289..7b26d6182 100644 --- a/weed/filer/ydb/ydb_store.go +++ b/weed/filer/ydb/ydb_store.go @@ -144,7 +144,7 @@ func (store *YdbStore) insertOrUpdateEntry(ctx context.Context, entry *filer.Ent return fmt.Errorf("encode %s: %s", entry.FullPath, err) } - if len(entry.Chunks) > filer.CountEntryChunksForGzip { + if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { meta = util.MaybeGzipData(meta) } tablePathPrefix, shortDir := store.getPrefix(ctx, &dir) diff --git a/weed/mount/filehandle.go b/weed/mount/filehandle.go index b2e6730c0..c2a197da7 100644 --- a/weed/mount/filehandle.go +++ b/weed/mount/filehandle.go @@ -94,7 +94,7 @@ func (fh *FileHandle) AddChunks(chunks []*filer_pb.FileChunk) { } // pick out-of-order chunks from existing chunks - for _, chunk := range fh.entry.Chunks { + for _, chunk := range fh.entry.GetChunks() { if lessThan(earliestChunk, chunk) { chunks = append(chunks, chunk) } @@ -105,9 +105,9 @@ func (fh *FileHandle) AddChunks(chunks []*filer_pb.FileChunk) { return lessThan(a, b) }) - glog.V(4).Infof("%s existing %d chunks adds %d more", fh.FullPath(), len(fh.entry.Chunks), len(chunks)) + glog.V(4).Infof("%s existing %d chunks adds %d more", fh.FullPath(), len(fh.entry.GetChunks()), len(chunks)) - fh.entry.Chunks = append(fh.entry.Chunks, newChunks...) + fh.entry.Chunks = append(fh.entry.GetChunks(), newChunks...) fh.entryViewCache = nil } diff --git a/weed/mount/filehandle_read.go b/weed/mount/filehandle_read.go index d0192e73e..08f678e69 100644 --- a/weed/mount/filehandle_read.go +++ b/weed/mount/filehandle_read.go @@ -56,7 +56,7 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) { var chunkResolveErr error if fh.entryViewCache == nil { - fh.entryViewCache, chunkResolveErr = filer.NonOverlappingVisibleIntervals(fh.wfs.LookupFn(), entry.Chunks, 0, fileSize) + fh.entryViewCache, chunkResolveErr = filer.NonOverlappingVisibleIntervals(fh.wfs.LookupFn(), entry.GetChunks(), 0, fileSize) if chunkResolveErr != nil { return 0, fmt.Errorf("fail to resolve chunk manifest: %v", chunkResolveErr) } diff --git a/weed/mount/weedfs_attr.go b/weed/mount/weedfs_attr.go index 3ef36c492..7691d4e59 100644 --- a/weed/mount/weedfs_attr.go +++ b/weed/mount/weedfs_attr.go @@ -50,12 +50,12 @@ func (wfs *WFS) SetAttr(cancel <-chan struct{}, input *fuse.SetAttrIn, out *fuse } if size, ok := input.GetSize(); ok && entry != nil { - glog.V(4).Infof("%v setattr set size=%v chunks=%d", path, size, len(entry.Chunks)) + glog.V(4).Infof("%v setattr set size=%v chunks=%d", path, size, len(entry.GetChunks())) if size < filer.FileSize(entry) { // fmt.Printf("truncate %v \n", fullPath) var chunks []*filer_pb.FileChunk var truncatedChunks []*filer_pb.FileChunk - for _, chunk := range entry.Chunks { + for _, chunk := range entry.GetChunks() { int64Size := int64(chunk.Size) if chunk.Offset+int64Size > int64(size) { // this chunk is truncated diff --git a/weed/mount/weedfs_file_lseek.go b/weed/mount/weedfs_file_lseek.go index 0564ac0ee..43970983b 100644 --- a/weed/mount/weedfs_file_lseek.go +++ b/weed/mount/weedfs_file_lseek.go @@ -59,7 +59,7 @@ func (wfs *WFS) Lseek(cancel <-chan struct{}, in *fuse.LseekIn, out *fuse.LseekO // refresh view cache if necessary if fh.entryViewCache == nil { var err error - fh.entryViewCache, err = filer.NonOverlappingVisibleIntervals(fh.wfs.LookupFn(), fh.entry.Chunks, 0, fileSize) + fh.entryViewCache, err = filer.NonOverlappingVisibleIntervals(fh.wfs.LookupFn(), fh.entry.GetChunks(), 0, fileSize) if err != nil { return fuse.EIO } diff --git a/weed/mount/weedfs_file_sync.go b/weed/mount/weedfs_file_sync.go index 7b80ddc73..585ca0b47 100644 --- a/weed/mount/weedfs_file_sync.go +++ b/weed/mount/weedfs_file_sync.go @@ -148,12 +148,12 @@ func (wfs *WFS) doFlush(fh *FileHandle, uid, gid uint32) fuse.Status { SkipCheckParentDirectory: true, } - glog.V(4).Infof("%s set chunks: %v", fileFullPath, len(entry.Chunks)) - for i, chunk := range entry.Chunks { + glog.V(4).Infof("%s set chunks: %v", fileFullPath, len(entry.GetChunks())) + for i, chunk := range entry.GetChunks() { glog.V(4).Infof("%s chunks %d: %v [%d,%d)", fileFullPath, i, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size)) } - manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(entry.Chunks) + manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(entry.GetChunks()) chunks, _ := filer.CompactFileChunks(wfs.LookupFn(), nonManifestChunks) chunks, manifestErr := filer.MaybeManifestize(wfs.saveDataAsChunk(fileFullPath), chunks) diff --git a/weed/mount/weedfs_link.go b/weed/mount/weedfs_link.go index d4ba63fc3..cfff69dd9 100644 --- a/weed/mount/weedfs_link.go +++ b/weed/mount/weedfs_link.go @@ -67,7 +67,7 @@ func (wfs *WFS) Link(cancel <-chan struct{}, in *fuse.LinkIn, name string, out * Name: name, IsDirectory: false, Attributes: oldEntry.Attributes, - Chunks: oldEntry.Chunks, + Chunks: oldEntry.GetChunks(), Extended: oldEntry.Extended, HardLinkId: oldEntry.HardLinkId, HardLinkCounter: oldEntry.HardLinkCounter, diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go index 1e7f5bf38..fa21531af 100644 --- a/weed/pb/filer_pb/filer.pb.go +++ b/weed/pb/filer_pb/filer.pb.go @@ -4480,7 +4480,7 @@ var file_filer_proto_goTypes = []interface{}{ var file_filer_proto_depIdxs = []int32{ 5, // 0: filer_pb.LookupDirectoryEntryResponse.entry:type_name -> filer_pb.Entry 5, // 1: filer_pb.ListEntriesResponse.entry:type_name -> filer_pb.Entry - 8, // 2: filer_pb.Entry.chunks:type_name -> filer_pb.FileChunk + 8, // 2: filer_pb.Entry.GetChunks():type_name -> filer_pb.FileChunk 11, // 3: filer_pb.Entry.attributes:type_name -> filer_pb.FuseAttributes 55, // 4: filer_pb.Entry.extended:type_name -> filer_pb.Entry.ExtendedEntry 4, // 5: filer_pb.Entry.remote_entry:type_name -> filer_pb.RemoteEntry diff --git a/weed/pb/filer_pb/filer_pb_helper.go b/weed/pb/filer_pb/filer_pb_helper.go index 2278d9a7c..5e5d1d1ae 100644 --- a/weed/pb/filer_pb/filer_pb_helper.go +++ b/weed/pb/filer_pb/filer_pb_helper.go @@ -14,7 +14,7 @@ import ( ) func (entry *Entry) IsInRemoteOnly() bool { - return len(entry.Chunks) == 0 && entry.RemoteEntry != nil && entry.RemoteEntry.RemoteSize > 0 + return len(entry.GetChunks()) == 0 && entry.RemoteEntry != nil && entry.RemoteEntry.RemoteSize > 0 } func (entry *Entry) IsDirectoryKeyObject() bool { diff --git a/weed/replication/sink/azuresink/azure_sink.go b/weed/replication/sink/azuresink/azure_sink.go index 87540383d..9bbd7b8eb 100644 --- a/weed/replication/sink/azuresink/azure_sink.go +++ b/weed/replication/sink/azuresink/azure_sink.go @@ -103,7 +103,7 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry, signatures [] } totalSize := filer.FileSize(entry) - chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize)) + chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.GetChunks(), 0, int64(totalSize)) // Create a URL that references a to-be-created blob in your // Azure Storage account's container. diff --git a/weed/replication/sink/b2sink/b2_sink.go b/weed/replication/sink/b2sink/b2_sink.go index 52883644b..de7899c60 100644 --- a/weed/replication/sink/b2sink/b2_sink.go +++ b/weed/replication/sink/b2sink/b2_sink.go @@ -92,7 +92,7 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int } totalSize := filer.FileSize(entry) - chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize)) + chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.GetChunks(), 0, int64(totalSize)) bucket, err := g.client.Bucket(context.Background(), g.bucket) if err != nil { diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go index b922be568..de5ff55cc 100644 --- a/weed/replication/sink/filersink/filer_sink.go +++ b/weed/replication/sink/filersink/filer_sink.go @@ -120,14 +120,14 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures [ } } - replicatedChunks, err := fs.replicateChunks(entry.Chunks, key) + replicatedChunks, err := fs.replicateChunks(entry.GetChunks(), key) if err != nil { // only warning here since the source chunk may have been deleted already glog.Warningf("replicate entry chunks %s: %v", key, err) } - glog.V(4).Infof("replicated %s %+v ===> %+v", key, entry.Chunks, replicatedChunks) + glog.V(4).Infof("replicated %s %+v ===> %+v", key, entry.GetChunks(), replicatedChunks) request := &filer_pb.CreateEntryRequest{ Directory: dir, @@ -199,7 +199,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent // delete the chunks that are deleted from the source if deleteIncludeChunks { // remove the deleted chunks. Actual data deletion happens in filer UpdateEntry FindUnusedFileChunks - existingEntry.Chunks = filer.DoMinusChunksBySourceFileId(existingEntry.Chunks, deletedChunks) + existingEntry.Chunks = filer.DoMinusChunksBySourceFileId(existingEntry.GetChunks(), deletedChunks) } // replicate the chunks that are new in the source @@ -207,7 +207,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent if err != nil { return true, fmt.Errorf("replicate %s chunks error: %v", key, err) } - existingEntry.Chunks = append(existingEntry.Chunks, replicatedChunks...) + existingEntry.Chunks = append(existingEntry.GetChunks(), replicatedChunks...) existingEntry.Attributes = newEntry.Attributes existingEntry.Extended = newEntry.Extended existingEntry.HardLinkId = newEntry.HardLinkId @@ -235,11 +235,11 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent } func compareChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk, err error) { - aData, aMeta, aErr := filer.ResolveChunkManifest(lookupFileIdFn, oldEntry.Chunks, 0, math.MaxInt64) + aData, aMeta, aErr := filer.ResolveChunkManifest(lookupFileIdFn, oldEntry.GetChunks(), 0, math.MaxInt64) if aErr != nil { return nil, nil, aErr } - bData, bMeta, bErr := filer.ResolveChunkManifest(lookupFileIdFn, newEntry.Chunks, 0, math.MaxInt64) + bData, bMeta, bErr := filer.ResolveChunkManifest(lookupFileIdFn, newEntry.GetChunks(), 0, math.MaxInt64) if bErr != nil { return nil, nil, bErr } diff --git a/weed/replication/sink/gcssink/gcs_sink.go b/weed/replication/sink/gcssink/gcs_sink.go index 8c9fd5b15..db6ea4aec 100644 --- a/weed/replication/sink/gcssink/gcs_sink.go +++ b/weed/replication/sink/gcssink/gcs_sink.go @@ -97,7 +97,7 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []in } totalSize := filer.FileSize(entry) - chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize)) + chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.GetChunks(), 0, int64(totalSize)) wc := g.client.Bucket(g.bucket).Object(key).NewWriter(context.Background()) defer wc.Close() diff --git a/weed/replication/sink/localsink/local_sink.go b/weed/replication/sink/localsink/local_sink.go index e69045336..70f5cfc9d 100644 --- a/weed/replication/sink/localsink/local_sink.go +++ b/weed/replication/sink/localsink/local_sink.go @@ -75,7 +75,7 @@ func (localsink *LocalSink) CreateEntry(key string, entry *filer_pb.Entry, signa glog.V(4).Infof("Create Entry key: %s", key) totalSize := filer.FileSize(entry) - chunkViews := filer.ViewFromChunks(localsink.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize)) + chunkViews := filer.ViewFromChunks(localsink.filerSource.LookupFileId, entry.GetChunks(), 0, int64(totalSize)) dir := filepath.Dir(key) diff --git a/weed/s3api/filer_multipart.go b/weed/s3api/filer_multipart.go index 0167b5906..414ba4bb2 100644 --- a/weed/s3api/filer_multipart.go +++ b/weed/s3api/filer_multipart.go @@ -33,7 +33,7 @@ func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInp uploadIdString := s3a.generateUploadID(*input.Key) - uploadIdString = uploadIdString + "_" +strings.ReplaceAll(uuid.New().String(),"-","") + uploadIdString = uploadIdString + "_" + strings.ReplaceAll(uuid.New().String(), "-", "") if err := s3a.mkdir(s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) { if entry.Extended == nil { @@ -106,7 +106,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa glog.Errorf("completeMultipartUpload %s ETag mismatch chunk: %s part: %s", entry.Name, entryETag, partETag) return nil, s3err.ErrInvalidPart } - for _, chunk := range entry.Chunks { + for _, chunk := range entry.GetChunks() { p := &filer_pb.FileChunk{ FileId: chunk.GetFileIdString(), Offset: offset, diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index 5b07ace07..05ebb31a8 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -203,14 +203,14 @@ func (fs *FilerServer) cleanupChunks(fullpath string, existingEntry *filer.Entry // remove old chunks if not included in the new ones if existingEntry != nil { - garbage, err = filer.MinusChunks(fs.lookupFileId, existingEntry.Chunks, newEntry.Chunks) + garbage, err = filer.MinusChunks(fs.lookupFileId, existingEntry.GetChunks(), newEntry.GetChunks()) if err != nil { - return newEntry.Chunks, nil, fmt.Errorf("MinusChunks: %v", err) + return newEntry.GetChunks(), nil, fmt.Errorf("MinusChunks: %v", err) } } // files with manifest chunks are usually large and append only, skip calculating covered chunks - manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(newEntry.Chunks) + manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(newEntry.GetChunks()) chunks, coveredChunks := filer.CompactFileChunks(fs.lookupFileId, nonManifestChunks) garbage = append(garbage, coveredChunks...) @@ -256,7 +256,7 @@ func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendTo }, } } else { - offset = int64(filer.TotalSize(entry.Chunks)) + offset = int64(filer.TotalSize(entry.GetChunks())) } for _, chunk := range req.Chunks { @@ -264,13 +264,13 @@ func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendTo offset += int64(chunk.Size) } - entry.Chunks = append(entry.Chunks, req.Chunks...) + entry.Chunks = append(entry.GetChunks(), req.Chunks...) so, err := fs.detectStorageOption(string(fullpath), "", "", entry.TtlSec, "", "", "", "") if err != nil { glog.Warningf("detectStorageOption: %v", err) return &filer_pb.AppendToEntryResponse{}, err } - entry.Chunks, err = filer.MaybeManifestize(fs.saveAsChunk(so), entry.Chunks) + entry.Chunks, err = filer.MaybeManifestize(fs.saveAsChunk(so), entry.GetChunks()) if err != nil { // not good, but should be ok glog.V(0).Infof("MaybeManifestize: %v", err) diff --git a/weed/server/filer_grpc_server_remote.go b/weed/server/filer_grpc_server_remote.go index 7df61744c..740aad497 100644 --- a/weed/server/filer_grpc_server_remote.go +++ b/weed/server/filer_grpc_server_remote.go @@ -169,7 +169,7 @@ func (fs *FilerServer) CacheRemoteObjectToLocalCluster(ctx context.Context, req return nil, fetchAndWriteErr } - garbage := entry.Chunks + garbage := entry.GetChunks() newEntry := entry.ShallowClone() newEntry.Chunks = chunks diff --git a/weed/server/filer_grpc_server_rename.go b/weed/server/filer_grpc_server_rename.go index 452ceba71..8a3a8f07f 100644 --- a/weed/server/filer_grpc_server_rename.go +++ b/weed/server/filer_grpc_server_rename.go @@ -165,7 +165,7 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, stream filer_pb.Seawee newEntry := &filer.Entry{ FullPath: newPath, Attr: entry.Attr, - Chunks: entry.Chunks, + Chunks: entry.GetChunks(), Extended: entry.Extended, Content: entry.Content, HardLinkCounter: entry.HardLinkCounter, diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index 645a3fb44..06e4b72c8 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -135,7 +135,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) if query.Get("resolveManifest") == "true" { if entry.Chunks, _, err = filer.ResolveChunkManifest( fs.filer.MasterClient.GetLookupFileIdFunction(), - entry.Chunks, 0, math.MaxInt64); err != nil { + entry.GetChunks(), 0, math.MaxInt64); err != nil { err = fmt.Errorf("failed to resolve chunk manifest, err: %s", err.Error()) writeJsonError(w, r, http.StatusInternalServerError, err) } @@ -212,7 +212,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) if shouldResize { data := mem.Allocate(int(totalSize)) defer mem.Free(data) - err := filer.ReadAll(data, fs.filer.MasterClient, entry.Chunks) + err := filer.ReadAll(data, fs.filer.MasterClient, entry.GetChunks()) if err != nil { glog.Errorf("failed to read %s: %v", path, err) w.WriteHeader(http.StatusInternalServerError) @@ -233,7 +233,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) } return err } - chunks := entry.Chunks + chunks := entry.GetChunks() if entry.IsInRemoteOnly() { dir, name := entry.FullPath.DirAndName() if resp, err := fs.CacheRemoteObjectToLocalCluster(context.Background(), &filer_pb.CacheRemoteObjectToLocalClusterRequest{ @@ -244,7 +244,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) glog.Errorf("CacheRemoteObjectToLocalCluster %s: %v", entry.FullPath, err) return fmt.Errorf("cache %s: %v", entry.FullPath, err) } else { - chunks = resp.Entry.Chunks + chunks = resp.Entry.GetChunks() } } diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go index 7064cac02..6dd8833b8 100644 --- a/weed/server/filer_server_handlers_write_autochunk.go +++ b/weed/server/filer_server_handlers_write_autochunk.go @@ -181,7 +181,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa } entry.FileSize += uint64(chunkOffset) } - newChunks = append(entry.Chunks, fileChunks...) + newChunks = append(entry.GetChunks(), fileChunks...) // TODO if len(entry.Content) > 0 { diff --git a/weed/server/filer_server_handlers_write_cipher.go b/weed/server/filer_server_handlers_write_cipher.go index 058d35646..bd8761077 100644 --- a/weed/server/filer_server_handlers_write_cipher.go +++ b/weed/server/filer_server_handlers_write_cipher.go @@ -91,7 +91,7 @@ func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *ht } if dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil, false); dbErr != nil { - fs.filer.DeleteChunks(entry.Chunks) + fs.filer.DeleteChunks(entry.GetChunks()) err = dbErr filerResult.Error = dbErr.Error() return diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go index 4563d040c..58c47671f 100644 --- a/weed/server/webdav_server.go +++ b/weed/server/webdav_server.go @@ -438,13 +438,13 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { } f.entry.Content = nil - f.entry.Chunks = append(f.entry.Chunks, chunk) + f.entry.Chunks = append(f.entry.GetChunks(), chunk) return flushErr } f.bufWriter.CloseFunc = func() error { - manifestedChunks, manifestErr := filer.MaybeManifestize(f.saveDataAsChunk, f.entry.Chunks) + manifestedChunks, manifestErr := filer.MaybeManifestize(f.saveDataAsChunk, f.entry.GetChunks()) if manifestErr != nil { // not good, but should be ok glog.V(0).Infof("file %s close MaybeManifestize: %v", f.name, manifestErr) @@ -514,7 +514,7 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) { return 0, io.EOF } if f.entryViewCache == nil { - f.entryViewCache, _ = filer.NonOverlappingVisibleIntervals(filer.LookupFn(f.fs), f.entry.Chunks, 0, fileSize) + f.entryViewCache, _ = filer.NonOverlappingVisibleIntervals(filer.LookupFn(f.fs), f.entry.GetChunks(), 0, fileSize) f.reader = nil } if f.reader == nil { diff --git a/weed/shell/command_fs_cat.go b/weed/shell/command_fs_cat.go index da88e9574..bdaa757f5 100644 --- a/weed/shell/command_fs_cat.go +++ b/weed/shell/command_fs_cat.go @@ -55,7 +55,7 @@ func (c *commandFsCat) Do(args []string, commandEnv *CommandEnv, writer io.Write return err } - return filer.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.Chunks, 0, int64(filer.FileSize(respLookupEntry.Entry))) + return filer.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.GetChunks(), 0, int64(filer.FileSize(respLookupEntry.Entry))) }) diff --git a/weed/shell/command_fs_du.go b/weed/shell/command_fs_du.go index 1a6ea82a8..e27ff6f6c 100644 --- a/weed/shell/command_fs_du.go +++ b/weed/shell/command_fs_du.go @@ -69,7 +69,7 @@ func duTraverseDirectory(writer io.Writer, filerClient filer_pb.FilerClient, dir byteCount += numByte } } else { - fileBlockCount = uint64(len(entry.Chunks)) + fileBlockCount = uint64(len(entry.GetChunks())) fileByteCount = filer.FileSize(entry) blockCount += fileBlockCount byteCount += fileByteCount diff --git a/weed/shell/command_fs_ls.go b/weed/shell/command_fs_ls.go index 6fe76920e..10764175b 100644 --- a/weed/shell/command_fs_ls.go +++ b/weed/shell/command_fs_ls.go @@ -93,7 +93,7 @@ func (c *commandFsLs) Do(args []string, commandEnv *CommandEnv, writer io.Writer dir = dir[:len(dir)-1] } fmt.Fprintf(writer, "%s %3d %s %s %6d %s/%s\n", - fileMode, len(entry.Chunks), + fileMode, len(entry.GetChunks()), userName, groupName, filer.FileSize(entry), dir, entry.Name) } else { diff --git a/weed/shell/command_fs_meta_cat.go b/weed/shell/command_fs_meta_cat.go index 3ae106415..7ad1035a2 100644 --- a/weed/shell/command_fs_meta_cat.go +++ b/weed/shell/command_fs_meta_cat.go @@ -54,8 +54,8 @@ func (c *commandFsMetaCat) Do(args []string, commandEnv *CommandEnv, writer io.W bytes, _ := proto.Marshal(respLookupEntry.Entry) gzippedBytes, _ := util.GzipData(bytes) // zstdBytes, _ := util.ZstdData(bytes) - // fmt.Fprintf(writer, "chunks %d meta size: %d gzip:%d zstd:%d\n", len(respLookupEntry.Entry.Chunks), len(bytes), len(gzippedBytes), len(zstdBytes)) - fmt.Fprintf(writer, "chunks %d meta size: %d gzip:%d\n", len(respLookupEntry.Entry.Chunks), len(bytes), len(gzippedBytes)) + // fmt.Fprintf(writer, "chunks %d meta size: %d gzip:%d zstd:%d\n", len(respLookupEntry.Entry.GetChunks()), len(bytes), len(gzippedBytes), len(zstdBytes)) + fmt.Fprintf(writer, "chunks %d meta size: %d gzip:%d\n", len(respLookupEntry.Entry.GetChunks()), len(bytes), len(gzippedBytes)) return nil diff --git a/weed/shell/command_volume_fsck.go b/weed/shell/command_volume_fsck.go index ffe173ae9..559b11cd3 100644 --- a/weed/shell/command_volume_fsck.go +++ b/weed/shell/command_volume_fsck.go @@ -216,7 +216,7 @@ func (c *commandVolumeFsck) collectFilerFileIdAndPaths(dataNodeVolumeIdToVInfo m if *c.verbose && entry.Entry.IsDirectory { fmt.Fprintf(c.writer, "checking directory %s\n", util.NewFullPath(entry.Dir, entry.Entry.Name)) } - dataChunks, manifestChunks, resolveErr := filer.ResolveChunkManifest(filer.LookupFn(c.env), entry.Entry.Chunks, 0, math.MaxInt64) + dataChunks, manifestChunks, resolveErr := filer.ResolveChunkManifest(filer.LookupFn(c.env), entry.Entry.GetChunks(), 0, math.MaxInt64) if resolveErr != nil { return fmt.Errorf("failed to ResolveChunkManifest: %+v", resolveErr) }