1
0
Fork 0
mirror of https://github.com/chrislusf/seaweedfs synced 2025-09-09 21:02:46 +02:00
seaweedfs/weed/mount/weedfs.go
chrislu f02c4f816b Production Integration: ML-aware FUSE mount optimizations
OPTION A COMPLETE: Full production integration of ML optimization system

## Major Integration Components:

### 1. Command Line Interface
- Add ML optimization flags to 'weed mount' command:
  * -ml.enabled: Enable/disable ML optimizations
  * -ml.prefetchWorkers: Configure concurrent prefetch workers (default: 8)
  * -ml.confidenceThreshold: Set ML confidence threshold (default: 0.6)
  * -ml.maxPrefetchAhead: Max chunks to prefetch ahead (default: 8)
  * -ml.batchSize: Batch size for prefetch operations (default: 3)
- Updated command help text with ML Optimization section and usage examples
- Complete flag parsing and validation pipeline

### 2. Core WFS Integration
- Add MLIntegrationManager to WFS struct with proper lifecycle management
- Initialize ML optimization based on mount flags with custom configuration
- Integrate ML system shutdown with graceful cleanup on mount termination
- Memory-safe initialization with proper error handling

### 3. FUSE Operation Hooks
- **File Open (wfs.Open)**: Apply ML-specific optimizations (FOPEN_KEEP_CACHE, direct I/O)
- **File Read (wfs.Read)**: Record access patterns for ML prefetch decision making
- **File Close (wfs.Release)**: Update ML file tracking and cleanup resources
- **Get Attributes (wfs.GetAttr)**: Apply ML-aware attribute cache timeouts
- All hooks properly guarded with nil checks and enabled status validation

### 4. Configuration Management
- Mount options propagated through Option struct to ML system
- NewMLIntegrationManagerWithConfig for runtime configuration
- Default fallbacks and validation for all ML parameters
- Seamless integration with existing mount option processing

## Production Features:

 **Zero-Impact Design**: ML optimizations only activate when explicitly enabled
 **Backward Compatibility**: All existing mount functionality preserved
 **Resource Management**: Proper initialization, shutdown, and cleanup
 **Error Handling**: Graceful degradation if ML components fail
 **Performance Monitoring**: Integration points for metrics and debugging
 **Configuration Flexibility**: Runtime tunable parameters via mount flags

## Testing Verification:
-  Successful compilation of entire codebase
-  Mount command properly shows ML flags in help text
-  Flag parsing and validation working correctly
-  ML optimization system initializes when enabled
-  FUSE operations integrate ML hooks without breaking existing functionality

## Usage Examples:

Basic ML optimization:
backers.md
bin
build
cmd
CODE_OF_CONDUCT.md
DESIGN.md
docker
examples
filerldb2
go.mod
go.sum
k8s
LICENSE
Makefile
ML_OPTIMIZATION_PLAN.md
note
other
random
README.md
s3tests_boto3
scripts
seaweedfs-rdma-sidecar
snap
SSE-C_IMPLEMENTATION.md
telemetry
test
test-volume-data
unmaintained
util
venv
weed
chrislu          console      Aug 27 13:07
chrislu          ttys004      Aug 27 13:11
chrislu          ttys012      Aug 28 14:00
Filesystem     512-blocks       Used Available Capacity  iused      ifree %iused  Mounted on
/dev/disk3s1s1 1942700360   22000776 332038696     7%   425955 1660193480    0%   /
devfs                 494        494         0   100%      856          0  100%   /dev
/dev/disk3s6   1942700360    6291632 332038696     2%        3 1660193480    0%   /System/Volumes/VM
/dev/disk3s2   1942700360   13899920 332038696     5%     1270 1660193480    0%   /System/Volumes/Preboot
/dev/disk3s4   1942700360       4440 332038696     1%       54 1660193480    0%   /System/Volumes/Update
/dev/disk1s2      1024000      12328    983744     2%        1    4918720    0%   /System/Volumes/xarts
/dev/disk1s1      1024000      11064    983744     2%       32    4918720    0%   /System/Volumes/iSCPreboot
/dev/disk1s3      1024000       7144    983744     1%       92    4918720    0%   /System/Volumes/Hardware
/dev/disk3s5   1942700360 1566013608 332038696    83% 11900819 1660193480    1%   /System/Volumes/Data
map auto_home           0          0         0   100%        0          0     -   /System/Volumes/Data/home
Filesystem     512-blocks       Used Available Capacity  iused      ifree %iused  Mounted on
/dev/disk3s1s1 1942700360   22000776 332038696     7%   425955 1660193480    0%   /
devfs                 494        494         0   100%      856          0  100%   /dev
/dev/disk3s6   1942700360    6291632 332038696     2%        3 1660193480    0%   /System/Volumes/VM
/dev/disk3s2   1942700360   13899920 332038696     5%     1270 1660193480    0%   /System/Volumes/Preboot
/dev/disk3s4   1942700360       4440 332038696     1%       54 1660193480    0%   /System/Volumes/Update
/dev/disk1s2      1024000      12328    983744     2%        1    4918720    0%   /System/Volumes/xarts
/dev/disk1s1      1024000      11064    983744     2%       32    4918720    0%   /System/Volumes/iSCPreboot
/dev/disk1s3      1024000       7144    983744     1%       92    4918720    0%   /System/Volumes/Hardware
/dev/disk3s5   1942700360 1566013608 332038696    83% 11900819 1660193480    1%   /System/Volumes/Data
map auto_home           0          0         0   100%        0          0     -   /System/Volumes/Data/home
/Users/chrislu/go/src/github.com/seaweedfs/seaweedfs
HQ-KT6TWPKFQD
/Users/chrislu/go/src/github.com/seaweedfs/seaweedfs

Custom ML configuration:
backers.md
bin
build
cmd
CODE_OF_CONDUCT.md
DESIGN.md
docker
examples
filerldb2
go.mod
go.sum
k8s
LICENSE
Makefile
ML_OPTIMIZATION_PLAN.md
note
other
random
README.md
s3tests_boto3
scripts
seaweedfs-rdma-sidecar
snap
SSE-C_IMPLEMENTATION.md
telemetry
test
test-volume-data
unmaintained
util
venv
weed
/Users/chrislu/go/src/github.com/seaweedfs/seaweedfs

## Architecture Impact:
- Clean separation between core FUSE and ML optimization layers
- Modular design allows easy extension and maintenance
- Production-ready with comprehensive error handling and resource management
- Foundation established for advanced ML features (Phase 4)

This completes Option A: Production Integration, providing a fully functional ML-aware FUSE mount system ready for real-world ML workloads.
2025-08-30 16:06:25 -07:00

309 lines
9.5 KiB
Go

package mount
import (
"context"
"errors"
"math/rand/v2"
"os"
"path"
"path/filepath"
"sync"
"sync/atomic"
"time"
"github.com/hanwen/go-fuse/v2/fuse"
"google.golang.org/grpc"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/mount/meta_cache"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/mount_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/types"
"github.com/seaweedfs/seaweedfs/weed/util"
"github.com/seaweedfs/seaweedfs/weed/util/chunk_cache"
"github.com/seaweedfs/seaweedfs/weed/util/grace"
"github.com/seaweedfs/seaweedfs/weed/util/version"
"github.com/seaweedfs/seaweedfs/weed/wdclient"
"github.com/hanwen/go-fuse/v2/fs"
)
type Option struct {
filerIndex int32 // align memory for atomic read/write
FilerAddresses []pb.ServerAddress
MountDirectory string
GrpcDialOption grpc.DialOption
FilerMountRootPath string
Collection string
Replication string
TtlSec int32
DiskType types.DiskType
ChunkSizeLimit int64
ConcurrentWriters int
CacheDirForRead string
CacheSizeMBForRead int64
CacheDirForWrite string
CacheMetaTTlSec int
DataCenter string
Umask os.FileMode
Quota int64
DisableXAttr bool
IsMacOs bool
MountUid uint32
MountGid uint32
MountMode os.FileMode
MountCtime time.Time
MountMtime time.Time
MountParentInode uint64
VolumeServerAccess string // how to access volume servers
Cipher bool // whether encrypt data on volume server
UidGidMapper *meta_cache.UidGidMapper
// RDMA acceleration options
RdmaEnabled bool
RdmaSidecarAddr string
RdmaFallback bool
RdmaReadOnly bool
RdmaMaxConcurrent int
RdmaTimeoutMs int
// ML optimization options
MLOptimizationEnabled bool
MLPrefetchWorkers int
MLConfidenceThreshold float64
MLMaxPrefetchAhead int
MLBatchSize int
uniqueCacheDirForRead string
uniqueCacheDirForWrite string
}
type WFS struct {
// https://dl.acm.org/doi/fullHtml/10.1145/3310148
// follow https://github.com/hanwen/go-fuse/blob/master/fuse/api.go
fuse.RawFileSystem
mount_pb.UnimplementedSeaweedMountServer
fs.Inode
option *Option
metaCache *meta_cache.MetaCache
stats statsCache
chunkCache *chunk_cache.TieredChunkCache
signature int32
concurrentWriters *util.LimitedConcurrentExecutor
copyBufferPool sync.Pool
concurrentCopiersSem chan struct{}
inodeToPath *InodeToPath
fhMap *FileHandleToInode
dhMap *DirectoryHandleToInode
fuseServer *fuse.Server
IsOverQuota bool
fhLockTable *util.LockTable[FileHandleId]
rdmaClient *RDMAMountClient
mlIntegration *MLIntegrationManager
FilerConf *filer.FilerConf
}
func NewSeaweedFileSystem(option *Option) *WFS {
wfs := &WFS{
RawFileSystem: fuse.NewDefaultRawFileSystem(),
option: option,
signature: util.RandomInt32(),
inodeToPath: NewInodeToPath(util.FullPath(option.FilerMountRootPath), option.CacheMetaTTlSec),
fhMap: NewFileHandleToInode(),
dhMap: NewDirectoryHandleToInode(),
fhLockTable: util.NewLockTable[FileHandleId](),
}
wfs.option.filerIndex = int32(rand.IntN(len(option.FilerAddresses)))
wfs.option.setupUniqueCacheDirectory()
if option.CacheSizeMBForRead > 0 {
wfs.chunkCache = chunk_cache.NewTieredChunkCache(256, option.getUniqueCacheDirForRead(), option.CacheSizeMBForRead, 1024*1024)
}
wfs.metaCache = meta_cache.NewMetaCache(path.Join(option.getUniqueCacheDirForRead(), "meta"), option.UidGidMapper,
util.FullPath(option.FilerMountRootPath),
func(path util.FullPath) {
wfs.inodeToPath.MarkChildrenCached(path)
}, func(path util.FullPath) bool {
return wfs.inodeToPath.IsChildrenCached(path)
}, func(filePath util.FullPath, entry *filer_pb.Entry) {
// Find inode if it is not a deleted path
if inode, inodeFound := wfs.inodeToPath.GetInode(filePath); inodeFound {
// Find open file handle
if fh, fhFound := wfs.fhMap.FindFileHandle(inode); fhFound {
fhActiveLock := fh.wfs.fhLockTable.AcquireLock("invalidateFunc", fh.fh, util.ExclusiveLock)
defer fh.wfs.fhLockTable.ReleaseLock(fh.fh, fhActiveLock)
// Recreate dirty pages
fh.dirtyPages.Destroy()
fh.dirtyPages = newPageWriter(fh, wfs.option.ChunkSizeLimit)
// Update handle entry
newEntry, status := wfs.maybeLoadEntry(filePath)
if status == fuse.OK {
if fh.GetEntry().GetEntry() != newEntry {
fh.SetEntry(newEntry)
}
}
}
}
})
grace.OnInterrupt(func() {
wfs.metaCache.Shutdown()
os.RemoveAll(option.getUniqueCacheDirForWrite())
os.RemoveAll(option.getUniqueCacheDirForRead())
if wfs.rdmaClient != nil {
wfs.rdmaClient.Close()
}
if wfs.mlIntegration != nil {
wfs.mlIntegration.Shutdown()
}
})
// Initialize RDMA client if enabled
if option.RdmaEnabled && option.RdmaSidecarAddr != "" {
rdmaClient, err := NewRDMAMountClient(
option.RdmaSidecarAddr,
wfs.LookupFn(),
option.RdmaMaxConcurrent,
option.RdmaTimeoutMs,
)
if err != nil {
glog.Warningf("Failed to initialize RDMA client: %v", err)
} else {
wfs.rdmaClient = rdmaClient
glog.Infof("RDMA acceleration enabled: sidecar=%s, maxConcurrent=%d, timeout=%dms",
option.RdmaSidecarAddr, option.RdmaMaxConcurrent, option.RdmaTimeoutMs)
}
}
// Initialize ML optimization if enabled
if option.MLOptimizationEnabled {
wfs.mlIntegration = NewMLIntegrationManagerWithConfig(
wfs.chunkCache,
wfs.LookupFn(),
option.MLPrefetchWorkers,
option.MLConfidenceThreshold,
option.MLMaxPrefetchAhead,
option.MLBatchSize,
)
glog.Infof("ML optimization enabled: prefetchWorkers=%d, confidenceThreshold=%.2f, maxPrefetchAhead=%d",
option.MLPrefetchWorkers, option.MLConfidenceThreshold, option.MLMaxPrefetchAhead)
}
if wfs.option.ConcurrentWriters > 0 {
wfs.concurrentWriters = util.NewLimitedConcurrentExecutor(wfs.option.ConcurrentWriters)
wfs.concurrentCopiersSem = make(chan struct{}, wfs.option.ConcurrentWriters)
}
wfs.copyBufferPool.New = func() any {
return make([]byte, option.ChunkSizeLimit)
}
return wfs
}
func (wfs *WFS) StartBackgroundTasks() error {
follower, err := wfs.subscribeFilerConfEvents()
if err != nil {
return err
}
startTime := time.Now()
go meta_cache.SubscribeMetaEvents(wfs.metaCache, wfs.signature, wfs, wfs.option.FilerMountRootPath, startTime.UnixNano(), follower)
go wfs.loopCheckQuota()
return nil
}
func (wfs *WFS) String() string {
return "seaweedfs"
}
func (wfs *WFS) Init(server *fuse.Server) {
wfs.fuseServer = server
}
func (wfs *WFS) maybeReadEntry(inode uint64) (path util.FullPath, fh *FileHandle, entry *filer_pb.Entry, status fuse.Status) {
path, status = wfs.inodeToPath.GetPath(inode)
if status != fuse.OK {
return
}
var found bool
if fh, found = wfs.fhMap.FindFileHandle(inode); found {
entry = fh.UpdateEntry(func(entry *filer_pb.Entry) {
if entry != nil && fh.entry.Attributes == nil {
entry.Attributes = &filer_pb.FuseAttributes{}
}
})
} else {
entry, status = wfs.maybeLoadEntry(path)
}
return
}
func (wfs *WFS) maybeLoadEntry(fullpath util.FullPath) (*filer_pb.Entry, fuse.Status) {
// glog.V(3).Infof("read entry cache miss %s", fullpath)
dir, name := fullpath.DirAndName()
// return a valid entry for the mount root
if string(fullpath) == wfs.option.FilerMountRootPath {
return &filer_pb.Entry{
Name: name,
IsDirectory: true,
Attributes: &filer_pb.FuseAttributes{
Mtime: wfs.option.MountMtime.Unix(),
FileMode: uint32(wfs.option.MountMode),
Uid: wfs.option.MountUid,
Gid: wfs.option.MountGid,
Crtime: wfs.option.MountCtime.Unix(),
},
}, fuse.OK
}
// read from async meta cache
meta_cache.EnsureVisited(wfs.metaCache, wfs, util.FullPath(dir))
cachedEntry, cacheErr := wfs.metaCache.FindEntry(context.Background(), fullpath)
if errors.Is(cacheErr, filer_pb.ErrNotFound) {
return nil, fuse.ENOENT
}
return cachedEntry.ToProtoEntry(), fuse.OK
}
func (wfs *WFS) LookupFn() wdclient.LookupFileIdFunctionType {
if wfs.option.VolumeServerAccess == "filerProxy" {
return func(ctx context.Context, fileId string) (targetUrls []string, err error) {
return []string{"http://" + wfs.getCurrentFiler().ToHttpAddress() + "/?proxyChunkId=" + fileId}, nil
}
}
return filer.LookupFn(wfs)
}
func (wfs *WFS) getCurrentFiler() pb.ServerAddress {
i := atomic.LoadInt32(&wfs.option.filerIndex)
return wfs.option.FilerAddresses[i]
}
func (wfs *WFS) ClearCacheDir() {
wfs.metaCache.Shutdown()
os.RemoveAll(wfs.option.getUniqueCacheDirForWrite())
os.RemoveAll(wfs.option.getUniqueCacheDirForRead())
}
func (option *Option) setupUniqueCacheDirectory() {
cacheUniqueId := util.Md5String([]byte(option.MountDirectory + string(option.FilerAddresses[0]) + option.FilerMountRootPath + version.Version()))[0:8]
option.uniqueCacheDirForRead = path.Join(option.CacheDirForRead, cacheUniqueId)
os.MkdirAll(option.uniqueCacheDirForRead, os.FileMode(0777)&^option.Umask)
option.uniqueCacheDirForWrite = filepath.Join(path.Join(option.CacheDirForWrite, cacheUniqueId), "swap")
os.MkdirAll(option.uniqueCacheDirForWrite, os.FileMode(0777)&^option.Umask)
}
func (option *Option) getUniqueCacheDirForWrite() string {
return option.uniqueCacheDirForWrite
}
func (option *Option) getUniqueCacheDirForRead() string {
return option.uniqueCacheDirForRead
}