Fix quadratic file reading in restic mount

This commit is contained in:
greatroar 2020-06-14 20:48:52 +02:00 committed by Michael Eischer
parent 8598bb042b
commit d42c169458
2 changed files with 24 additions and 32 deletions

View File

@ -3,6 +3,8 @@
package fuse
import (
"sort"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
@ -18,21 +20,26 @@ const blockSize = 512
// Statically ensure that *file implements the given interface
var _ = fs.HandleReader(&file{})
var _ = fs.HandleReleaser(&file{})
type file struct {
root *Root
node *restic.Node
inode uint64
sizes []int
blobs [][]byte
// cumsize[i] holds the cumulative size of blobs[:i].
cumsize []uint64
// Cached blob and its index in the blobs of node.
cached struct {
blob []byte
index int
}
}
func newFile(ctx context.Context, root *Root, inode uint64, node *restic.Node) (fusefile *file, err error) {
debug.Log("create new file for %v with %d blobs", node.Name, len(node.Content))
var bytes uint64
sizes := make([]int, len(node.Content))
cumsize := make([]uint64, 1+len(node.Content))
for i, id := range node.Content {
size, ok := root.blobSizeCache.Lookup(id)
if !ok {
@ -43,8 +50,8 @@ func newFile(ctx context.Context, root *Root, inode uint64, node *restic.Node) (
}
}
sizes[i] = int(size)
bytes += uint64(size)
cumsize[i+1] = bytes
}
if bytes != node.Size {
@ -56,8 +63,8 @@ func newFile(ctx context.Context, root *Root, inode uint64, node *restic.Node) (
inode: inode,
root: root,
node: node,
sizes: sizes,
blobs: make([][]byte, len(node.Content)),
cumsize: cumsize,
}, nil
}
@ -84,13 +91,8 @@ func (f *file) Attr(ctx context.Context, a *fuse.Attr) error {
func (f *file) getBlobAt(ctx context.Context, i int) (blob []byte, err error) {
debug.Log("getBlobAt(%v, %v)", f.node.Name, i)
if f.blobs[i] != nil {
return f.blobs[i], nil
}
// release earlier blobs
for j := 0; j < i; j++ {
f.blobs[j] = nil
if i == f.cached.index && f.cached.blob != nil {
return f.cached.blob, nil
}
blob, err = f.root.repo.LoadBlob(ctx, restic.DataBlob, f.node.Content[i], nil)
@ -98,16 +100,16 @@ func (f *file) getBlobAt(ctx context.Context, i int) (blob []byte, err error) {
debug.Log("LoadBlob(%v, %v) failed: %v", f.node.Name, f.node.Content[i], err)
return nil, err
}
f.blobs[i] = blob
f.cached.blob, f.cached.index = blob, i
return blob, nil
}
func (f *file) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
debug.Log("Read(%v, %v, %v), file size %v", f.node.Name, req.Size, req.Offset, f.node.Size)
offset := req.Offset
offset := uint64(req.Offset)
if uint64(offset) > f.node.Size {
if offset > f.node.Size {
debug.Log("Read(%v): offset is greater than file size: %v > %v",
f.node.Name, req.Offset, f.node.Size)
@ -123,16 +125,15 @@ func (f *file) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadR
}
// Skip blobs before the offset
startContent := 0
for offset > int64(f.sizes[startContent]) {
offset -= int64(f.sizes[startContent])
startContent++
}
startContent := -1 + sort.Search(len(f.cumsize), func(i int) bool {
return f.cumsize[i] > offset
})
offset -= f.cumsize[startContent]
dst := resp.Data[0:req.Size]
readBytes := 0
remainingBytes := req.Size
for i := startContent; remainingBytes > 0 && i < len(f.sizes); i++ {
for i := startContent; remainingBytes > 0 && i < len(f.cumsize)-1; i++ {
blob, err := f.getBlobAt(ctx, i)
if err != nil {
return err
@ -154,13 +155,6 @@ func (f *file) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadR
return nil
}
func (f *file) Release(ctx context.Context, req *fuse.ReleaseRequest) error {
for i := range f.blobs {
f.blobs[i] = nil
}
return nil
}
func (f *file) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
debug.Log("Listxattr(%v, %v)", f.node.Name, req.Size)
for _, attr := range f.node.ExtendedAttributes {

View File

@ -146,8 +146,6 @@ func TestFuseFile(t *testing.T) {
t.Errorf("test %d failed, wrong data returned (offset %v, length %v)", i, offset, length)
}
}
rtest.OK(t, f.Release(ctx, nil))
}
// Test top-level directories for their UID and GID.