From bfc1bc6ee6979dd1a71126892499b26d61e729ee Mon Sep 17 00:00:00 2001 From: George Armhold Date: Wed, 5 Sep 2018 08:04:55 -0400 Subject: [PATCH] clean up some errors from 'go vet ./...' --- go.sum | 1 + internal/archiver/archiver_test.go | 26 +++++++++++++------------- internal/archiver/scanner_test.go | 6 +++--- internal/archiver/tree.go | 1 - internal/backend/b2/b2.go | 2 -- internal/migrations/s3_layout.go | 2 -- 6 files changed, 17 insertions(+), 21 deletions(-) diff --git a/go.sum b/go.sum index 372807d5f..8262958d0 100644 --- a/go.sum +++ b/go.sum @@ -58,6 +58,7 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/restic/chunker v0.2.0 h1:GjvmvFuv2mx0iekZs+iAlrioo2UtgsGSSplvoXaVHDU= github.com/restic/chunker v0.2.0/go.mod h1:VdjruEj+7BU1ZZTW8Qqi1exxRx2Omf2JH0NsUEkQ29s= +github.com/russross/blackfriday v1.5.1 h1:B8ZN6pD4PVofmlDCDUdELeYrbsVIDM/bpjW3v3zgcRc= github.com/russross/blackfriday v1.5.1/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index cff28c519..e7cda551a 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -137,7 +137,7 @@ func TestArchiverSaveFile(t *testing.T) { tempdir, repo, cleanup := prepareTempdirRepoSrc(t, TestDir{"file": testfile}) defer cleanup() - node, stats := saveFile(t, repo, filepath.Join(tempdir, "file"), fs.Track{fs.Local{}}) + node, stats := saveFile(t, repo, filepath.Join(tempdir, "file"), fs.Track{FS: fs.Local{}}) TestEnsureFileContent(ctx, t, repo, "file", node, testfile) if stats.DataSize != uint64(len(testfile.Content)) { @@ -218,7 +218,7 @@ func TestArchiverSave(t *testing.T) { var tmb tomb.Tomb - arch := New(repo, fs.Track{fs.Local{}}, Options{}) + arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) arch.Error = func(item string, fi os.FileInfo, err error) error { t.Errorf("archiver error for %v: %v", item, err) return err @@ -358,7 +358,7 @@ func BenchmarkArchiverSaveFileSmall(b *testing.B) { tempdir, repo, cleanup := prepareTempdirRepoSrc(b, d) b.StartTimer() - _, stats := saveFile(b, repo, filepath.Join(tempdir, "file"), fs.Track{fs.Local{}}) + _, stats := saveFile(b, repo, filepath.Join(tempdir, "file"), fs.Track{FS: fs.Local{}}) b.StopTimer() if stats.DataSize != fileSize { @@ -391,7 +391,7 @@ func BenchmarkArchiverSaveFileLarge(b *testing.B) { tempdir, repo, cleanup := prepareTempdirRepoSrc(b, d) b.StartTimer() - _, stats := saveFile(b, repo, filepath.Join(tempdir, "file"), fs.Track{fs.Local{}}) + _, stats := saveFile(b, repo, filepath.Join(tempdir, "file"), fs.Track{FS: fs.Local{}}) b.StopTimer() if stats.DataSize != fileSize { @@ -471,7 +471,7 @@ func TestArchiverSaveFileIncremental(t *testing.T) { for i := 0; i < 3; i++ { appendToFile(t, testfile, data) - node, _ := saveFile(t, repo, testfile, fs.Track{fs.Local{}}) + node, _ := saveFile(t, repo, testfile, fs.Track{FS: fs.Local{}}) t.Logf("node blobs: %v", node.Content) @@ -752,7 +752,7 @@ func TestArchiverSaveDir(t *testing.T) { tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src) defer cleanup() - arch := New(repo, fs.Track{fs.Local{}}, Options{}) + arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) arch.runWorkers(ctx, &tmb) chdir := tempdir @@ -842,7 +842,7 @@ func TestArchiverSaveDirIncremental(t *testing.T) { var tmb tomb.Tomb ctx := tmb.Context(context.Background()) - arch := New(repo, fs.Track{fs.Local{}}, Options{}) + arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) arch.runWorkers(ctx, &tmb) fi, err := fs.Lstat(tempdir) @@ -1002,7 +1002,7 @@ func TestArchiverSaveTree(t *testing.T) { tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src) defer cleanup() - testFS := fs.Track{fs.Local{}} + testFS := fs.Track{FS: fs.Local{}} arch := New(repo, testFS, Options{}) arch.runWorkers(ctx, &tmb) @@ -1291,7 +1291,7 @@ func TestArchiverSnapshot(t *testing.T) { tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src) defer cleanup() - arch := New(repo, fs.Track{fs.Local{}}, Options{}) + arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) chdir := tempdir if test.chdir != "" { @@ -1455,7 +1455,7 @@ func TestArchiverSnapshotSelect(t *testing.T) { tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src) defer cleanup() - arch := New(repo, fs.Track{fs.Local{}}, Options{}) + arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) arch.Select = test.selFn back := fs.TestChdir(t, tempdir) @@ -1559,7 +1559,7 @@ func TestArchiverParent(t *testing.T) { defer cleanup() testFS := &MockFS{ - FS: fs.Track{fs.Local{}}, + FS: fs.Track{FS: fs.Local{}}, bytesRead: make(map[string]int), } @@ -1732,7 +1732,7 @@ func TestArchiverErrorReporting(t *testing.T) { test.prepare(t) } - arch := New(repo, fs.Track{fs.Local{}}, Options{}) + arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) arch.Error = test.errFn _, snapshotID, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) @@ -1867,7 +1867,7 @@ func TestArchiverAbortEarlyOnError(t *testing.T) { defer back() testFS := &TrackFS{ - FS: fs.Track{fs.Local{}}, + FS: fs.Track{FS: fs.Local{}}, opened: make(map[string]uint), } diff --git a/internal/archiver/scanner_test.go b/internal/archiver/scanner_test.go index 91b8d7f63..f02546f3c 100644 --- a/internal/archiver/scanner_test.go +++ b/internal/archiver/scanner_test.go @@ -96,7 +96,7 @@ func TestScanner(t *testing.T) { t.Fatal(err) } - sc := NewScanner(fs.Track{fs.Local{}}) + sc := NewScanner(fs.Track{FS: fs.Local{}}) if test.selFn != nil { sc.Select = test.selFn } @@ -237,7 +237,7 @@ func TestScannerError(t *testing.T) { test.prepare(t) } - sc := NewScanner(fs.Track{fs.Local{}}) + sc := NewScanner(fs.Track{FS: fs.Local{}}) if test.selFn != nil { sc.Select = test.selFn } @@ -307,7 +307,7 @@ func TestScannerCancel(t *testing.T) { t.Fatal(err) } - sc := NewScanner(fs.Track{fs.Local{}}) + sc := NewScanner(fs.Track{FS: fs.Local{}}) var lastStats ScanStats sc.Result = func(item string, s ScanStats) { lastStats = s diff --git a/internal/archiver/tree.go b/internal/archiver/tree.go index 5835839b2..0c8a21539 100644 --- a/internal/archiver/tree.go +++ b/internal/archiver/tree.go @@ -232,7 +232,6 @@ func unrollTree(f fs.FS, t *Tree) error { } return errors.Errorf("tree unrollTree: collision on path, node %#v, path %q", node, f.Join(t.Path, entry)) - continue } t.Nodes[entry] = Tree{Path: f.Join(t.Path, entry)} } diff --git a/internal/backend/b2/b2.go b/internal/backend/b2/b2.go index 32bf78361..abb38c32c 100644 --- a/internal/backend/b2/b2.go +++ b/internal/backend/b2/b2.go @@ -320,8 +320,6 @@ func (be *b2Backend) List(ctx context.Context, t restic.FileType, fn func(restic } cur = c } - - return ctx.Err() } // Remove keys for a specified backend type. diff --git a/internal/migrations/s3_layout.go b/internal/migrations/s3_layout.go index 12ffef0ff..9dbf8dfa3 100644 --- a/internal/migrations/s3_layout.go +++ b/internal/migrations/s3_layout.go @@ -67,8 +67,6 @@ func (m *S3Layout) moveFiles(ctx context.Context, be *s3.Backend, l backend.Layo return be.Rename(h, l) }) }) - - return nil } // Apply runs the migration.