1
0
Fork 0
mirror of https://github.com/chrislusf/seaweedfs synced 2024-06-29 05:41:02 +02:00

revert ErrXyz to ErrorXyz temporarily.

This commit is contained in:
bingoohuang 2021-02-20 16:57:07 +08:00
parent 50df484d86
commit ee7cdf3668
7 changed files with 16 additions and 16 deletions

View file

@ -93,7 +93,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
} else if hasEcVolume { } else if hasEcVolume {
count, err = vs.store.ReadEcShardNeedle(volumeId, n) count, err = vs.store.ReadEcShardNeedle(volumeId, n)
} }
if err != nil && err != storage.ErrDeleted && r.FormValue("type") != "replicate" && hasVolume { if err != nil && err != storage.ErrorDeleted && r.FormValue("type") != "replicate" && hasVolume {
glog.V(4).Infof("read needle: %v", err) glog.V(4).Infof("read needle: %v", err)
// start to fix it from other replicas, if not deleted and hasVolume and is not a replicated request // start to fix it from other replicas, if not deleted and hasVolume and is not a replicated request
} }

View file

@ -24,7 +24,7 @@ const (
TtlBytesLength = 2 TtlBytesLength = 2
) )
var ErrSizeMismatch = errors.New("size mismatch") var ErrorSizeMismatch = errors.New("size mismatch")
func (n *Needle) DiskSize(version Version) int64 { func (n *Needle) DiskSize(version Version) int64 {
return GetActualSize(n.Size, version) return GetActualSize(n.Size, version)
@ -173,7 +173,7 @@ func (n *Needle) ReadBytes(bytes []byte, offset int64, size Size, version Versio
// cookie is not always passed in for this API. Use size to do preliminary checking. // cookie is not always passed in for this API. Use size to do preliminary checking.
if OffsetSize == 4 && offset < int64(MaxPossibleVolumeSize) { if OffsetSize == 4 && offset < int64(MaxPossibleVolumeSize) {
glog.Errorf("entry not found1: offset %d found id %x size %d, expected size %d", offset, n.Id, n.Size, size) glog.Errorf("entry not found1: offset %d found id %x size %d, expected size %d", offset, n.Id, n.Size, size)
return ErrSizeMismatch return ErrorSizeMismatch
} }
return fmt.Errorf("entry not found: offset %d found id %x size %d, expected size %d", offset, n.Id, n.Size, size) return fmt.Errorf("entry not found: offset %d found id %x size %d, expected size %d", offset, n.Id, n.Size, size)
} }

View file

@ -130,7 +130,7 @@ func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle) (int, e
return 0, fmt.Errorf("locate in local ec volume: %v", err) return 0, fmt.Errorf("locate in local ec volume: %v", err)
} }
if size.IsDeleted() { if size.IsDeleted() {
return 0, ErrDeleted return 0, ErrorDeleted
} }
glog.V(3).Infof("read ec volume %d offset %d size %d intervals:%+v", vid, offset.ToActualOffset(), size, intervals) glog.V(3).Infof("read ec volume %d offset %d size %d intervals:%+v", vid, offset.ToActualOffset(), size, intervals)
@ -143,7 +143,7 @@ func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle) (int, e
return 0, fmt.Errorf("ReadEcShardIntervals: %v", err) return 0, fmt.Errorf("ReadEcShardIntervals: %v", err)
} }
if isDeleted { if isDeleted {
return 0, ErrDeleted return 0, ErrorDeleted
} }
err = n.ReadBytes(bytes, offset.ToActualOffset(), size, localEcVolume.Version) err = n.ReadBytes(bytes, offset.ToActualOffset(), size, localEcVolume.Version)

View file

@ -30,7 +30,7 @@ func CheckAndFixVolumeDataIntegrity(v *Volume, indexFile *os.File) (lastAppendAt
healthyIndexSize = indexSize - int64(i)*NeedleMapEntrySize healthyIndexSize = indexSize - int64(i)*NeedleMapEntrySize
continue continue
} }
if err != ErrSizeMismatch { if err != ErrorSizeMismatch {
break break
} }
} }
@ -94,7 +94,7 @@ func verifyNeedleIntegrity(datFile backend.BackendStorageFile, v needle.Version,
return 0, fmt.Errorf("read %s at %d", datFile.Name(), offset) return 0, fmt.Errorf("read %s at %d", datFile.Name(), offset)
} }
if n.Size != size { if n.Size != size {
return 0, ErrSizeMismatch return 0, ErrorSizeMismatch
} }
if v == needle.Version3 { if v == needle.Version3 {
bytes := make([]byte, TimestampSize) bytes := make([]byte, TimestampSize)

View file

@ -15,9 +15,9 @@ import (
. "github.com/chrislusf/seaweedfs/weed/storage/types" . "github.com/chrislusf/seaweedfs/weed/storage/types"
) )
var ErrNotFound = errors.New("not found") var ErrorNotFound = errors.New("not found")
var ErrDeleted = errors.New("already deleted") var ErrorDeleted = errors.New("already deleted")
var ErrSizeMismatch = errors.New("size mismatch") var ErrorSizeMismatch = errors.New("size mismatch")
func (v *Volume) checkReadWriteError(err error) { func (v *Volume) checkReadWriteError(err error) {
if err == nil { if err == nil {
@ -289,7 +289,7 @@ func (v *Volume) readNeedle(n *needle.Needle, readOption *ReadOption) (int, erro
nv, ok := v.nm.Get(n.Id) nv, ok := v.nm.Get(n.Id)
if !ok || nv.Offset.IsZero() { if !ok || nv.Offset.IsZero() {
return -1, ErrNotFound return -1, ErrorNotFound
} }
readSize := nv.Size readSize := nv.Size
if readSize.IsDeleted() { if readSize.IsDeleted() {
@ -297,14 +297,14 @@ func (v *Volume) readNeedle(n *needle.Needle, readOption *ReadOption) (int, erro
glog.V(3).Infof("reading deleted %s", n.String()) glog.V(3).Infof("reading deleted %s", n.String())
readSize = -readSize readSize = -readSize
} else { } else {
return -1, ErrDeleted return -1, ErrorDeleted
} }
} }
if readSize == 0 { if readSize == 0 {
return 0, nil return 0, nil
} }
err := n.ReadData(v.DataBackend, nv.Offset.ToActualOffset(), readSize, v.Version()) err := n.ReadData(v.DataBackend, nv.Offset.ToActualOffset(), readSize, v.Version())
if err == needle.ErrSizeMismatch && OffsetSize == 4 { if err == needle.ErrorSizeMismatch && OffsetSize == 4 {
// add special handling for .dat larger than 32GB, from git commit comment of // add special handling for .dat larger than 32GB, from git commit comment of
// 06c15ab3 Chris Lu <chris.lu@gmail.com> on 2020/10/28 at 4:11 上 // 06c15ab3 Chris Lu <chris.lu@gmail.com> on 2020/10/28 at 4:11 上
err = n.ReadData(v.DataBackend, nv.Offset.ToActualOffset()+int64(MaxPossibleVolumeSize), readSize, v.Version()) err = n.ReadData(v.DataBackend, nv.Offset.ToActualOffset()+int64(MaxPossibleVolumeSize), readSize, v.Version())
@ -327,7 +327,7 @@ func (v *Volume) readNeedle(n *needle.Needle, readOption *ReadOption) (int, erro
if time.Now().Before(time.Unix(0, int64(n.AppendAtNs)).Add(time.Duration(ttlMinutes) * time.Minute)) { if time.Now().Before(time.Unix(0, int64(n.AppendAtNs)).Add(time.Duration(ttlMinutes) * time.Minute)) {
return bytesRead, nil return bytesRead, nil
} }
return -1, ErrNotFound return -1, ErrorNotFound
} }
func (v *Volume) startWorker() { func (v *Volume) startWorker() {

View file

@ -104,7 +104,7 @@ func (v *ChunkCacheVolume) GetNeedle(key types.NeedleId) ([]byte, error) {
nv, ok := v.nm.Get(key) nv, ok := v.nm.Get(key)
if !ok { if !ok {
return nil, storage.ErrNotFound return nil, storage.ErrorNotFound
} }
data := make([]byte, nv.Size) data := make([]byte, nv.Size)
if readSize, readErr := v.DataBackend.ReadAt(data, nv.Offset.ToActualOffset()); readErr != nil { if readSize, readErr := v.DataBackend.ReadAt(data, nv.Offset.ToActualOffset()); readErr != nil {

View file

@ -66,7 +66,7 @@ func (c *OnDiskCacheLayer) getChunk(needleId types.NeedleId) (data []byte) {
for _, diskCache := range c.diskCaches { for _, diskCache := range c.diskCaches {
data, err = diskCache.GetNeedle(needleId) data, err = diskCache.GetNeedle(needleId)
if err == storage.ErrNotFound { if err == storage.ErrorNotFound {
continue continue
} }
if err != nil { if err != nil {