From 118a69a84b5408201142ea06f6e25ae908527598 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 24 Feb 2024 15:19:02 +0100 Subject: [PATCH 01/10] lock: replace lockRepo(Exclusive) with openWith(Read/Write/Exclusive)Lock The new functions much better convey the intent behind the lock request. This allows cleanly integrating noLock (for read) and dryRun (write/exclusive) handling. There are only minor changes to existing behavior with two exceptions: - `tag` no longer accepts the `--no-lock` flag. As it replaces files in the repository, this always requires an exclusive lock. - `debug examine` now returns an error if both `--extract-pack` and `--no-lock` are given. --- cmd/restic/cmd_backup.go | 19 ++------------- cmd/restic/cmd_cat.go | 12 ++-------- cmd/restic/cmd_check.go | 16 ++++--------- cmd/restic/cmd_copy.go | 21 ++++------------- cmd/restic/cmd_debug.go | 28 +++++++--------------- cmd/restic/cmd_diff.go | 12 ++-------- cmd/restic/cmd_dump.go | 12 ++-------- cmd/restic/cmd_find.go | 12 ++-------- cmd/restic/cmd_forget.go | 16 ++++--------- cmd/restic/cmd_key_add.go | 9 ++----- cmd/restic/cmd_key_list.go | 12 ++-------- cmd/restic/cmd_key_passwd.go | 9 ++----- cmd/restic/cmd_key_remove.go | 13 +++------- cmd/restic/cmd_list.go | 12 ++-------- cmd/restic/cmd_migrate.go | 9 ++----- cmd/restic/cmd_mount.go | 12 ++-------- cmd/restic/cmd_prune.go | 9 ++----- cmd/restic/cmd_recover.go | 9 ++----- cmd/restic/cmd_repair_index.go | 9 ++----- cmd/restic/cmd_repair_packs.go | 9 ++----- cmd/restic/cmd_repair_snapshots.go | 15 ++---------- cmd/restic/cmd_restore.go | 12 ++-------- cmd/restic/cmd_rewrite.go | 31 ++++++++++-------------- cmd/restic/cmd_snapshots.go | 12 ++-------- cmd/restic/cmd_stats.go | 12 ++-------- cmd/restic/cmd_tag.go | 16 ++++--------- cmd/restic/lock.go | 38 ++++++++++++++++++++++++++---- cmd/restic/lock_test.go | 18 +++++++------- 28 files changed, 122 insertions(+), 292 deletions(-) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index acc4bddb1..8b2f1f808 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -463,10 +463,11 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter Verbosef("open repository\n") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithAppendLock(ctx, gopts, opts.DryRun) if err != nil { return err } + defer unlock() var progressPrinter backup.ProgressPrinter if gopts.JSON { @@ -478,22 +479,6 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter calculateProgressInterval(!gopts.Quiet, gopts.JSON)) defer progressReporter.Done() - if opts.DryRun { - repo.SetDryRun() - } - - if !gopts.JSON { - progressPrinter.V("lock repository") - } - if !opts.DryRun { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } - // rejectByNameFuncs collect functions that can reject items from the backup based on path only rejectByNameFuncs, err := collectRejectByNameFuncs(opts, repo) if err != nil { diff --git a/cmd/restic/cmd_cat.go b/cmd/restic/cmd_cat.go index 92f58b2e7..ccec9b5d9 100644 --- a/cmd/restic/cmd_cat.go +++ b/cmd/restic/cmd_cat.go @@ -64,19 +64,11 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error { return err } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() tpe := args[0] diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index cbe388877..7bea641ae 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -204,20 +204,14 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args return code, nil }) - repo, err := OpenRepository(ctx, gopts) + if !gopts.NoLock { + Verbosef("create exclusive lock for repository\n") + } + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - Verbosef("create exclusive lock for repository\n") - var lock *restic.Lock - lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() chkr := checker.New(repo, opts.CheckUnused) err = chkr.LoadSnapshots(ctx) diff --git a/cmd/restic/cmd_copy.go b/cmd/restic/cmd_copy.go index 92922b42b..410134e41 100644 --- a/cmd/restic/cmd_copy.go +++ b/cmd/restic/cmd_copy.go @@ -62,30 +62,17 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args [] gopts, secondaryGopts = secondaryGopts, gopts } - srcRepo, err := OpenRepository(ctx, gopts) + ctx, srcRepo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } + defer unlock() - dstRepo, err := OpenRepository(ctx, secondaryGopts) - if err != nil { - return err - } - - if !gopts.NoLock { - var srcLock *restic.Lock - srcLock, ctx, err = lockRepo(ctx, srcRepo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(srcLock) - if err != nil { - return err - } - } - - dstLock, ctx, err := lockRepo(ctx, dstRepo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(dstLock) + ctx, dstRepo, unlock, err := openWithAppendLock(ctx, secondaryGopts, false) if err != nil { return err } + defer unlock() srcSnapshotLister, err := restic.MemorizeList(ctx, srcRepo, restic.SnapshotFile) if err != nil { diff --git a/cmd/restic/cmd_debug.go b/cmd/restic/cmd_debug.go index a87e7a0c5..3abb9d7eb 100644 --- a/cmd/restic/cmd_debug.go +++ b/cmd/restic/cmd_debug.go @@ -153,19 +153,11 @@ func runDebugDump(ctx context.Context, gopts GlobalOptions, args []string) error return errors.Fatal("type not specified") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() tpe := args[0] @@ -442,10 +434,15 @@ func storePlainBlob(id restic.ID, prefix string, plain []byte) error { } func runDebugExamine(ctx context.Context, gopts GlobalOptions, opts DebugExamineOptions, args []string) error { - repo, err := OpenRepository(ctx, gopts) + if opts.ExtractPack && gopts.NoLock { + return fmt.Errorf("--extract-pack and --no-lock are mutually exclusive") + } + + ctx, repo, unlock, err := openWithAppendLock(ctx, gopts, gopts.NoLock) if err != nil { return err } + defer unlock() ids := make([]restic.ID, 0) for _, name := range args { @@ -464,15 +461,6 @@ func runDebugExamine(ctx context.Context, gopts GlobalOptions, opts DebugExamine return errors.Fatal("no pack files to examine") } - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } - bar := newIndexProgress(gopts.Quiet, gopts.JSON) err = repo.LoadIndex(ctx, bar) if err != nil { diff --git a/cmd/restic/cmd_diff.go b/cmd/restic/cmd_diff.go index 3bd29fa67..b156191dc 100644 --- a/cmd/restic/cmd_diff.go +++ b/cmd/restic/cmd_diff.go @@ -344,19 +344,11 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args [] return errors.Fatalf("specify two snapshot IDs") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() // cache snapshots listing be, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) diff --git a/cmd/restic/cmd_dump.go b/cmd/restic/cmd_dump.go index 9178f2abe..39e915b40 100644 --- a/cmd/restic/cmd_dump.go +++ b/cmd/restic/cmd_dump.go @@ -131,19 +131,11 @@ func runDump(ctx context.Context, opts DumpOptions, gopts GlobalOptions, args [] splittedPath := splitPath(path.Clean(pathToPrint)) - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() sn, subfolder, err := (&restic.SnapshotFilter{ Hosts: opts.Hosts, diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go index 7ea7c425a..e29fe30dc 100644 --- a/cmd/restic/cmd_find.go +++ b/cmd/restic/cmd_find.go @@ -563,19 +563,11 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args [] return errors.Fatal("cannot have several ID types") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index 65ff449a3..f2fc1da8c 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -163,23 +163,15 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption return err } - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - if gopts.NoLock && !opts.DryRun { return errors.Fatal("--no-lock is only applicable in combination with --dry-run for forget command") } - if !opts.DryRun || !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, opts.DryRun && gopts.NoLock) + if err != nil { + return err } + defer unlock() var snapshots restic.Snapshots removeSnIDs := restic.NewIDSet() diff --git a/cmd/restic/cmd_key_add.go b/cmd/restic/cmd_key_add.go index 43a38f4eb..83e0cab7f 100644 --- a/cmd/restic/cmd_key_add.go +++ b/cmd/restic/cmd_key_add.go @@ -50,16 +50,11 @@ func runKeyAdd(ctx context.Context, gopts GlobalOptions, opts KeyAddOptions, arg return fmt.Errorf("the key add command expects no arguments, only options - please see `restic help key add` for usage and flags") } - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - - lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) + ctx, repo, unlock, err := openWithAppendLock(ctx, gopts, false) if err != nil { return err } + defer unlock() return addKey(ctx, repo, gopts, opts) } diff --git a/cmd/restic/cmd_key_list.go b/cmd/restic/cmd_key_list.go index 2b3574281..9bddb5ed3 100644 --- a/cmd/restic/cmd_key_list.go +++ b/cmd/restic/cmd_key_list.go @@ -40,19 +40,11 @@ func runKeyList(ctx context.Context, gopts GlobalOptions, args []string) error { return fmt.Errorf("the key list command expects no arguments, only options - please see `restic help key list` for usage and flags") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() return listKeys(ctx, repo, gopts) } diff --git a/cmd/restic/cmd_key_passwd.go b/cmd/restic/cmd_key_passwd.go index cb916274c..70abca6dc 100644 --- a/cmd/restic/cmd_key_passwd.go +++ b/cmd/restic/cmd_key_passwd.go @@ -47,16 +47,11 @@ func runKeyPasswd(ctx context.Context, gopts GlobalOptions, opts KeyPasswdOption return fmt.Errorf("the key passwd command expects no arguments, only options - please see `restic help key passwd` for usage and flags") } - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } + defer unlock() return changePassword(ctx, repo, gopts, opts) } diff --git a/cmd/restic/cmd_key_remove.go b/cmd/restic/cmd_key_remove.go index c8e303ffc..93babb4f3 100644 --- a/cmd/restic/cmd_key_remove.go +++ b/cmd/restic/cmd_key_remove.go @@ -37,20 +37,13 @@ func runKeyRemove(ctx context.Context, gopts GlobalOptions, args []string) error return fmt.Errorf("key remove expects one argument as the key id") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } + defer unlock() - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - - idPrefix := args[0] - - return deleteKey(ctx, repo, idPrefix) + return deleteKey(ctx, repo, args[0]) } func deleteKey(ctx context.Context, repo *repository.Repository, idPrefix string) error { diff --git a/cmd/restic/cmd_list.go b/cmd/restic/cmd_list.go index becad7f0d..a3df0c98f 100644 --- a/cmd/restic/cmd_list.go +++ b/cmd/restic/cmd_list.go @@ -36,19 +36,11 @@ func runList(ctx context.Context, gopts GlobalOptions, args []string) error { return errors.Fatal("type not specified") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock || args[0] == "locks") if err != nil { return err } - - if !gopts.NoLock && args[0] != "locks" { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() var t restic.FileType switch args[0] { diff --git a/cmd/restic/cmd_migrate.go b/cmd/restic/cmd_migrate.go index fd2e762c0..c3f82b8dd 100644 --- a/cmd/restic/cmd_migrate.go +++ b/cmd/restic/cmd_migrate.go @@ -117,16 +117,11 @@ func applyMigrations(ctx context.Context, opts MigrateOptions, gopts GlobalOptio } func runMigrate(ctx context.Context, opts MigrateOptions, gopts GlobalOptions, args []string) error { - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } + defer unlock() if len(args) == 0 { return checkMigrations(ctx, repo) diff --git a/cmd/restic/cmd_mount.go b/cmd/restic/cmd_mount.go index 5fd81b344..cb2b1142d 100644 --- a/cmd/restic/cmd_mount.go +++ b/cmd/restic/cmd_mount.go @@ -125,19 +125,11 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args debug.Log("start mount") defer debug.Log("finish mount") - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() bar := newIndexProgress(gopts.Quiet, gopts.JSON) err = repo.LoadIndex(ctx, bar) diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index 1b9352ea7..3a9a8c33c 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -148,10 +148,11 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error return errors.Fatal("disabled compression and `--repack-uncompressed` are mutually exclusive") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } + defer unlock() if repo.Connections() < 2 { return errors.Fatal("prune requires a backend connection limit of at least two") @@ -169,12 +170,6 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error opts.unsafeRecovery = true } - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - return runPruneWithRepo(ctx, opts, gopts, repo, restic.NewIDSet()) } diff --git a/cmd/restic/cmd_recover.go b/cmd/restic/cmd_recover.go index b97a7582b..f9a4d419d 100644 --- a/cmd/restic/cmd_recover.go +++ b/cmd/restic/cmd_recover.go @@ -40,16 +40,11 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error { return err } - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - - lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) + ctx, repo, unlock, err := openWithAppendLock(ctx, gopts, false) if err != nil { return err } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { diff --git a/cmd/restic/cmd_repair_index.go b/cmd/restic/cmd_repair_index.go index ea36f02f6..1ac743348 100644 --- a/cmd/restic/cmd_repair_index.go +++ b/cmd/restic/cmd_repair_index.go @@ -56,16 +56,11 @@ func init() { } func runRebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions) error { - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } + defer unlock() return rebuildIndex(ctx, opts, gopts, repo) } diff --git a/cmd/restic/cmd_repair_packs.go b/cmd/restic/cmd_repair_packs.go index 521b5859f..00dee076b 100644 --- a/cmd/restic/cmd_repair_packs.go +++ b/cmd/restic/cmd_repair_packs.go @@ -52,16 +52,11 @@ func runRepairPacks(ctx context.Context, gopts GlobalOptions, term *termstatus.T return errors.Fatal("no ids specified") } - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } + defer unlock() bar := newIndexProgress(gopts.Quiet, gopts.JSON) err = repo.LoadIndex(ctx, bar) diff --git a/cmd/restic/cmd_repair_snapshots.go b/cmd/restic/cmd_repair_snapshots.go index cc3d0eb85..4d9745e15 100644 --- a/cmd/restic/cmd_repair_snapshots.go +++ b/cmd/restic/cmd_repair_snapshots.go @@ -66,22 +66,11 @@ func init() { } func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOptions, args []string) error { - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, opts.DryRun) if err != nil { return err } - - if !opts.DryRun { - var lock *restic.Lock - var err error - lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } else { - repo.SetDryRun() - } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index 58f257541..5161be50d 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -127,19 +127,11 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, debug.Log("restore %v to %v", snapshotIDString, opts.Target) - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() sn, subfolder, err := (&restic.SnapshotFilter{ Hosts: opts.Hosts, diff --git a/cmd/restic/cmd_rewrite.go b/cmd/restic/cmd_rewrite.go index 62624e75c..06d4ddbd1 100644 --- a/cmd/restic/cmd_rewrite.go +++ b/cmd/restic/cmd_rewrite.go @@ -256,27 +256,22 @@ func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, a return errors.Fatal("Nothing to do: no excludes provided and no new metadata provided") } - repo, err := OpenRepository(ctx, gopts) + var ( + repo *repository.Repository + unlock func() + err error + ) + + if opts.Forget { + Verbosef("create exclusive lock for repository\n") + ctx, repo, unlock, err = openWithExclusiveLock(ctx, gopts, opts.DryRun) + } else { + ctx, repo, unlock, err = openWithAppendLock(ctx, gopts, opts.DryRun) + } if err != nil { return err } - - if !opts.DryRun { - var lock *restic.Lock - var err error - if opts.Forget { - Verbosef("create exclusive lock for repository\n") - lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - } else { - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - } - defer unlockRepo(lock) - if err != nil { - return err - } - } else { - repo.SetDryRun() - } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go index d6199d47a..1a9cd2232 100644 --- a/cmd/restic/cmd_snapshots.go +++ b/cmd/restic/cmd_snapshots.go @@ -59,19 +59,11 @@ func init() { } func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions, args []string) error { - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() var snapshots restic.Snapshots for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) { diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go index b84620bab..20d7a485c 100644 --- a/cmd/restic/cmd_stats.go +++ b/cmd/restic/cmd_stats.go @@ -80,19 +80,11 @@ func runStats(ctx context.Context, opts StatsOptions, gopts GlobalOptions, args return err } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { diff --git a/cmd/restic/cmd_tag.go b/cmd/restic/cmd_tag.go index 01f3ad8af..b0d139fa6 100644 --- a/cmd/restic/cmd_tag.go +++ b/cmd/restic/cmd_tag.go @@ -104,20 +104,12 @@ func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, args []st return errors.Fatal("--set and --add/--remove cannot be given at the same time") } - repo, err := OpenRepository(ctx, gopts) + Verbosef("create exclusive lock for repository\n") + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { - return err - } - - if !gopts.NoLock { - Verbosef("create exclusive lock for repository\n") - var lock *restic.Lock - lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } + return nil } + defer unlock() changeCnt := 0 for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) { diff --git a/cmd/restic/lock.go b/cmd/restic/lock.go index 600b7476f..29641e670 100644 --- a/cmd/restic/lock.go +++ b/cmd/restic/lock.go @@ -9,6 +9,7 @@ import ( "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" ) @@ -24,12 +25,41 @@ var globalLocks struct { sync.Once } -func lockRepo(ctx context.Context, repo restic.Repository, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) { - return lockRepository(ctx, repo, false, retryLock, json) +func internalOpenWithLocked(ctx context.Context, gopts GlobalOptions, dryRun bool, exclusive bool) (context.Context, *repository.Repository, func(), error) { + repo, err := OpenRepository(ctx, gopts) + if err != nil { + return nil, nil, nil, err + } + + unlock := func() {} + if !dryRun { + var lock *restic.Lock + lock, ctx, err = lockRepository(ctx, repo, exclusive, gopts.RetryLock, gopts.JSON) + unlock = func() { + unlockRepo(lock) + } + if err != nil { + return nil, nil, nil, err + } + } else { + repo.SetDryRun() + } + + return ctx, repo, unlock, nil } -func lockRepoExclusive(ctx context.Context, repo restic.Repository, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) { - return lockRepository(ctx, repo, true, retryLock, json) +func openWithReadLock(ctx context.Context, gopts GlobalOptions, noLock bool) (context.Context, *repository.Repository, func(), error) { + // TODO enfore read-only operations once the locking code has moved to the repository + return internalOpenWithLocked(ctx, gopts, noLock, false) +} + +func openWithAppendLock(ctx context.Context, gopts GlobalOptions, dryRun bool) (context.Context, *repository.Repository, func(), error) { + // TODO enfore non-exclusive operations once the locking code has moved to the repository + return internalOpenWithLocked(ctx, gopts, dryRun, false) +} + +func openWithExclusiveLock(ctx context.Context, gopts GlobalOptions, dryRun bool) (context.Context, *repository.Repository, func(), error) { + return internalOpenWithLocked(ctx, gopts, dryRun, true) } var ( diff --git a/cmd/restic/lock_test.go b/cmd/restic/lock_test.go index bf22db699..83d5f2a5e 100644 --- a/cmd/restic/lock_test.go +++ b/cmd/restic/lock_test.go @@ -37,7 +37,7 @@ func openLockTestRepo(t *testing.T, wrapper backendWrapper) (*repository.Reposit } func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, env *testEnvironment) (*restic.Lock, context.Context) { - lock, wrappedCtx, err := lockRepo(ctx, repo, env.gopts.RetryLock, env.gopts.JSON) + lock, wrappedCtx, err := lockRepository(ctx, repo, false, env.gopts.RetryLock, env.gopts.JSON) test.OK(t, err) test.OK(t, wrappedCtx.Err()) if lock.Stale() { @@ -94,10 +94,10 @@ func TestLockConflict(t *testing.T) { repo2, err := OpenRepository(context.TODO(), env.gopts) test.OK(t, err) - lock, _, err := lockRepoExclusive(context.Background(), repo, env.gopts.RetryLock, env.gopts.JSON) + lock, _, err := lockRepository(context.Background(), repo, true, env.gopts.RetryLock, env.gopts.JSON) test.OK(t, err) defer unlockRepo(lock) - _, _, err = lockRepo(context.Background(), repo2, env.gopts.RetryLock, env.gopts.JSON) + _, _, err = lockRepository(context.Background(), repo2, false, env.gopts.RetryLock, env.gopts.JSON) if err == nil { t.Fatal("second lock should have failed") } @@ -260,13 +260,13 @@ func TestLockWaitTimeout(t *testing.T) { repo, cleanup, env := openLockTestRepo(t, nil) defer cleanup() - elock, _, err := lockRepoExclusive(context.TODO(), repo, env.gopts.RetryLock, env.gopts.JSON) + elock, _, err := lockRepository(context.TODO(), repo, true, env.gopts.RetryLock, env.gopts.JSON) test.OK(t, err) retryLock := 200 * time.Millisecond start := time.Now() - lock, _, err := lockRepo(context.TODO(), repo, retryLock, env.gopts.JSON) + lock, _, err := lockRepository(context.TODO(), repo, false, retryLock, env.gopts.JSON) duration := time.Since(start) test.Assert(t, err != nil, @@ -284,7 +284,7 @@ func TestLockWaitCancel(t *testing.T) { repo, cleanup, env := openLockTestRepo(t, nil) defer cleanup() - elock, _, err := lockRepoExclusive(context.TODO(), repo, env.gopts.RetryLock, env.gopts.JSON) + elock, _, err := lockRepository(context.TODO(), repo, true, env.gopts.RetryLock, env.gopts.JSON) test.OK(t, err) retryLock := 200 * time.Millisecond @@ -294,7 +294,7 @@ func TestLockWaitCancel(t *testing.T) { ctx, cancel := context.WithCancel(context.TODO()) time.AfterFunc(cancelAfter, cancel) - lock, _, err := lockRepo(ctx, repo, retryLock, env.gopts.JSON) + lock, _, err := lockRepository(ctx, repo, false, retryLock, env.gopts.JSON) duration := time.Since(start) test.Assert(t, err != nil, @@ -312,7 +312,7 @@ func TestLockWaitSuccess(t *testing.T) { repo, cleanup, env := openLockTestRepo(t, nil) defer cleanup() - elock, _, err := lockRepoExclusive(context.TODO(), repo, env.gopts.RetryLock, env.gopts.JSON) + elock, _, err := lockRepository(context.TODO(), repo, true, env.gopts.RetryLock, env.gopts.JSON) test.OK(t, err) retryLock := 200 * time.Millisecond @@ -322,7 +322,7 @@ func TestLockWaitSuccess(t *testing.T) { test.OK(t, elock.Unlock()) }) - lock, _, err := lockRepo(context.TODO(), repo, retryLock, env.gopts.JSON) + lock, _, err := lockRepository(context.TODO(), repo, false, retryLock, env.gopts.JSON) test.OK(t, err) test.OK(t, lock.Unlock()) From cbb5f89252523f8ebe631ee53f326b75f810d6f8 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 24 Feb 2024 16:26:29 +0100 Subject: [PATCH 02/10] lock: move code to repository package --- cmd/restic/lock.go | 312 +----------------- internal/repository/lock.go | 301 +++++++++++++++++ .../repository}/lock_test.go | 120 ++++--- 3 files changed, 370 insertions(+), 363 deletions(-) create mode 100644 internal/repository/lock.go rename {cmd/restic => internal/repository}/lock_test.go (70%) diff --git a/cmd/restic/lock.go b/cmd/restic/lock.go index 29641e670..20ac4dd34 100644 --- a/cmd/restic/lock.go +++ b/cmd/restic/lock.go @@ -2,26 +2,13 @@ package main import ( "context" - "fmt" "sync" - "time" - "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" ) -type lockContext struct { - lock *restic.Lock - cancel context.CancelFunc - refreshWG sync.WaitGroup -} - var globalLocks struct { - locks map[*restic.Lock]*lockContext - sync.Mutex sync.Once } @@ -34,9 +21,20 @@ func internalOpenWithLocked(ctx context.Context, gopts GlobalOptions, dryRun boo unlock := func() {} if !dryRun { var lock *restic.Lock - lock, ctx, err = lockRepository(ctx, repo, exclusive, gopts.RetryLock, gopts.JSON) + + // make sure that a repository is unlocked properly and after cancel() was + // called by the cleanup handler in global.go + globalLocks.Do(func() { + AddCleanupHandler(repository.UnlockAll) + }) + + lock, ctx, err = repository.Lock(ctx, repo, exclusive, gopts.RetryLock, func(msg string) { + if !gopts.JSON { + Verbosef("%s", msg) + } + }, Warnf) unlock = func() { - unlockRepo(lock) + repository.Unlock(lock) } if err != nil { return nil, nil, nil, err @@ -61,287 +59,3 @@ func openWithAppendLock(ctx context.Context, gopts GlobalOptions, dryRun bool) ( func openWithExclusiveLock(ctx context.Context, gopts GlobalOptions, dryRun bool) (context.Context, *repository.Repository, func(), error) { return internalOpenWithLocked(ctx, gopts, dryRun, true) } - -var ( - retrySleepStart = 5 * time.Second - retrySleepMax = 60 * time.Second -) - -func minDuration(a, b time.Duration) time.Duration { - if a <= b { - return a - } - return b -} - -// lockRepository wraps the ctx such that it is cancelled when the repository is unlocked -// cancelling the original context also stops the lock refresh -func lockRepository(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) { - // make sure that a repository is unlocked properly and after cancel() was - // called by the cleanup handler in global.go - globalLocks.Do(func() { - AddCleanupHandler(unlockAll) - }) - - lockFn := restic.NewLock - if exclusive { - lockFn = restic.NewExclusiveLock - } - - var lock *restic.Lock - var err error - - retrySleep := minDuration(retrySleepStart, retryLock) - retryMessagePrinted := false - retryTimeout := time.After(retryLock) - -retryLoop: - for { - lock, err = lockFn(ctx, repo) - if err != nil && restic.IsAlreadyLocked(err) { - - if !retryMessagePrinted { - if !json { - Verbosef("repo already locked, waiting up to %s for the lock\n", retryLock) - } - retryMessagePrinted = true - } - - debug.Log("repo already locked, retrying in %v", retrySleep) - retrySleepCh := time.After(retrySleep) - - select { - case <-ctx.Done(): - return nil, ctx, ctx.Err() - case <-retryTimeout: - debug.Log("repo already locked, timeout expired") - // Last lock attempt - lock, err = lockFn(ctx, repo) - break retryLoop - case <-retrySleepCh: - retrySleep = minDuration(retrySleep*2, retrySleepMax) - } - } else { - // anything else, either a successful lock or another error - break retryLoop - } - } - if restic.IsInvalidLock(err) { - return nil, ctx, errors.Fatalf("%v\n\nthe `unlock --remove-all` command can be used to remove invalid locks. Make sure that no other restic process is accessing the repository when running the command", err) - } - if err != nil { - return nil, ctx, fmt.Errorf("unable to create lock in backend: %w", err) - } - debug.Log("create lock %p (exclusive %v)", lock, exclusive) - - ctx, cancel := context.WithCancel(ctx) - lockInfo := &lockContext{ - lock: lock, - cancel: cancel, - } - lockInfo.refreshWG.Add(2) - refreshChan := make(chan struct{}) - forceRefreshChan := make(chan refreshLockRequest) - - globalLocks.Lock() - globalLocks.locks[lock] = lockInfo - go refreshLocks(ctx, repo.Backend(), lockInfo, refreshChan, forceRefreshChan) - go monitorLockRefresh(ctx, lockInfo, refreshChan, forceRefreshChan) - globalLocks.Unlock() - - return lock, ctx, err -} - -var refreshInterval = 5 * time.Minute - -// consider a lock refresh failed a bit before the lock actually becomes stale -// the difference allows to compensate for a small time drift between clients. -var refreshabilityTimeout = restic.StaleLockTimeout - refreshInterval*3/2 - -type refreshLockRequest struct { - result chan bool -} - -func refreshLocks(ctx context.Context, backend backend.Backend, lockInfo *lockContext, refreshed chan<- struct{}, forceRefresh <-chan refreshLockRequest) { - debug.Log("start") - lock := lockInfo.lock - ticker := time.NewTicker(refreshInterval) - lastRefresh := lock.Time - - defer func() { - ticker.Stop() - // ensure that the context was cancelled before removing the lock - lockInfo.cancel() - - // remove the lock from the repo - debug.Log("unlocking repository with lock %v", lock) - if err := lock.Unlock(); err != nil { - debug.Log("error while unlocking: %v", err) - Warnf("error while unlocking: %v", err) - } - - lockInfo.refreshWG.Done() - }() - - for { - select { - case <-ctx.Done(): - debug.Log("terminate") - return - - case req := <-forceRefresh: - debug.Log("trying to refresh stale lock") - // keep on going if our current lock still exists - success := tryRefreshStaleLock(ctx, backend, lock, lockInfo.cancel) - // inform refresh goroutine about forced refresh - select { - case <-ctx.Done(): - case req.result <- success: - } - - if success { - // update lock refresh time - lastRefresh = lock.Time - } - - case <-ticker.C: - if time.Since(lastRefresh) > refreshabilityTimeout { - // the lock is too old, wait until the expiry monitor cancels the context - continue - } - - debug.Log("refreshing locks") - err := lock.Refresh(context.TODO()) - if err != nil { - Warnf("unable to refresh lock: %v\n", err) - } else { - lastRefresh = lock.Time - // inform monitor goroutine about successful refresh - select { - case <-ctx.Done(): - case refreshed <- struct{}{}: - } - } - } - } -} - -func monitorLockRefresh(ctx context.Context, lockInfo *lockContext, refreshed <-chan struct{}, forceRefresh chan<- refreshLockRequest) { - // time.Now() might use a monotonic timer which is paused during standby - // convert to unix time to ensure we compare real time values - lastRefresh := time.Now().UnixNano() - pollDuration := 1 * time.Second - if refreshInterval < pollDuration { - // require for TestLockFailedRefresh - pollDuration = refreshInterval / 5 - } - // timers are paused during standby, which is a problem as the refresh timeout - // _must_ expire if the host was too long in standby. Thus fall back to periodic checks - // https://github.com/golang/go/issues/35012 - ticker := time.NewTicker(pollDuration) - defer func() { - ticker.Stop() - lockInfo.cancel() - lockInfo.refreshWG.Done() - }() - - var refreshStaleLockResult chan bool - - for { - select { - case <-ctx.Done(): - debug.Log("terminate expiry monitoring") - return - case <-refreshed: - if refreshStaleLockResult != nil { - // ignore delayed refresh notifications while the stale lock is refreshed - continue - } - lastRefresh = time.Now().UnixNano() - case <-ticker.C: - if time.Now().UnixNano()-lastRefresh < refreshabilityTimeout.Nanoseconds() || refreshStaleLockResult != nil { - continue - } - - debug.Log("trying to refreshStaleLock") - // keep on going if our current lock still exists - refreshReq := refreshLockRequest{ - result: make(chan bool), - } - refreshStaleLockResult = refreshReq.result - - // inform refresh goroutine about forced refresh - select { - case <-ctx.Done(): - case forceRefresh <- refreshReq: - } - case success := <-refreshStaleLockResult: - if success { - lastRefresh = time.Now().UnixNano() - refreshStaleLockResult = nil - continue - } - - Warnf("Fatal: failed to refresh lock in time\n") - return - } - } -} - -func tryRefreshStaleLock(ctx context.Context, be backend.Backend, lock *restic.Lock, cancel context.CancelFunc) bool { - freeze := backend.AsBackend[backend.FreezeBackend](be) - if freeze != nil { - debug.Log("freezing backend") - freeze.Freeze() - defer freeze.Unfreeze() - } - - err := lock.RefreshStaleLock(ctx) - if err != nil { - Warnf("failed to refresh stale lock: %v\n", err) - // cancel context while the backend is still frozen to prevent accidental modifications - cancel() - return false - } - - return true -} - -func unlockRepo(lock *restic.Lock) { - if lock == nil { - return - } - - globalLocks.Lock() - lockInfo, exists := globalLocks.locks[lock] - delete(globalLocks.locks, lock) - globalLocks.Unlock() - - if !exists { - debug.Log("unable to find lock %v in the global list of locks, ignoring", lock) - return - } - lockInfo.cancel() - lockInfo.refreshWG.Wait() -} - -func unlockAll(code int) (int, error) { - globalLocks.Lock() - locks := globalLocks.locks - debug.Log("unlocking %d locks", len(globalLocks.locks)) - for _, lockInfo := range globalLocks.locks { - lockInfo.cancel() - } - globalLocks.locks = make(map[*restic.Lock]*lockContext) - globalLocks.Unlock() - - for _, lockInfo := range locks { - lockInfo.refreshWG.Wait() - } - - return code, nil -} - -func init() { - globalLocks.locks = make(map[*restic.Lock]*lockContext) -} diff --git a/internal/repository/lock.go b/internal/repository/lock.go new file mode 100644 index 000000000..c64cb9222 --- /dev/null +++ b/internal/repository/lock.go @@ -0,0 +1,301 @@ +package repository + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" +) + +type lockContext struct { + lock *restic.Lock + cancel context.CancelFunc + refreshWG sync.WaitGroup +} + +var globalLocks struct { + locks map[*restic.Lock]*lockContext + sync.Mutex +} + +var ( + retrySleepStart = 5 * time.Second + retrySleepMax = 60 * time.Second +) + +func minDuration(a, b time.Duration) time.Duration { + if a <= b { + return a + } + return b +} + +// Lock wraps the ctx such that it is cancelled when the repository is unlocked +// cancelling the original context also stops the lock refresh +func Lock(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*restic.Lock, context.Context, error) { + + lockFn := restic.NewLock + if exclusive { + lockFn = restic.NewExclusiveLock + } + + var lock *restic.Lock + var err error + + retrySleep := minDuration(retrySleepStart, retryLock) + retryMessagePrinted := false + retryTimeout := time.After(retryLock) + +retryLoop: + for { + lock, err = lockFn(ctx, repo) + if err != nil && restic.IsAlreadyLocked(err) { + + if !retryMessagePrinted { + printRetry(fmt.Sprintf("repo already locked, waiting up to %s for the lock\n", retryLock)) + retryMessagePrinted = true + } + + debug.Log("repo already locked, retrying in %v", retrySleep) + retrySleepCh := time.After(retrySleep) + + select { + case <-ctx.Done(): + return nil, ctx, ctx.Err() + case <-retryTimeout: + debug.Log("repo already locked, timeout expired") + // Last lock attempt + lock, err = lockFn(ctx, repo) + break retryLoop + case <-retrySleepCh: + retrySleep = minDuration(retrySleep*2, retrySleepMax) + } + } else { + // anything else, either a successful lock or another error + break retryLoop + } + } + if restic.IsInvalidLock(err) { + return nil, ctx, errors.Fatalf("%v\n\nthe `unlock --remove-all` command can be used to remove invalid locks. Make sure that no other restic process is accessing the repository when running the command", err) + } + if err != nil { + return nil, ctx, fmt.Errorf("unable to create lock in backend: %w", err) + } + debug.Log("create lock %p (exclusive %v)", lock, exclusive) + + ctx, cancel := context.WithCancel(ctx) + lockInfo := &lockContext{ + lock: lock, + cancel: cancel, + } + lockInfo.refreshWG.Add(2) + refreshChan := make(chan struct{}) + forceRefreshChan := make(chan refreshLockRequest) + + globalLocks.Lock() + globalLocks.locks[lock] = lockInfo + go refreshLocks(ctx, repo.Backend(), lockInfo, refreshChan, forceRefreshChan, logger) + go monitorLockRefresh(ctx, lockInfo, refreshChan, forceRefreshChan, logger) + globalLocks.Unlock() + + return lock, ctx, err +} + +var refreshInterval = 5 * time.Minute + +// consider a lock refresh failed a bit before the lock actually becomes stale +// the difference allows to compensate for a small time drift between clients. +var refreshabilityTimeout = restic.StaleLockTimeout - refreshInterval*3/2 + +type refreshLockRequest struct { + result chan bool +} + +func refreshLocks(ctx context.Context, backend backend.Backend, lockInfo *lockContext, refreshed chan<- struct{}, forceRefresh <-chan refreshLockRequest, logger func(format string, args ...interface{})) { + debug.Log("start") + lock := lockInfo.lock + ticker := time.NewTicker(refreshInterval) + lastRefresh := lock.Time + + defer func() { + ticker.Stop() + // ensure that the context was cancelled before removing the lock + lockInfo.cancel() + + // remove the lock from the repo + debug.Log("unlocking repository with lock %v", lock) + if err := lock.Unlock(); err != nil { + debug.Log("error while unlocking: %v", err) + logger("error while unlocking: %v", err) + } + + lockInfo.refreshWG.Done() + }() + + for { + select { + case <-ctx.Done(): + debug.Log("terminate") + return + + case req := <-forceRefresh: + debug.Log("trying to refresh stale lock") + // keep on going if our current lock still exists + success := tryRefreshStaleLock(ctx, backend, lock, lockInfo.cancel, logger) + // inform refresh goroutine about forced refresh + select { + case <-ctx.Done(): + case req.result <- success: + } + + if success { + // update lock refresh time + lastRefresh = lock.Time + } + + case <-ticker.C: + if time.Since(lastRefresh) > refreshabilityTimeout { + // the lock is too old, wait until the expiry monitor cancels the context + continue + } + + debug.Log("refreshing locks") + err := lock.Refresh(context.TODO()) + if err != nil { + logger("unable to refresh lock: %v\n", err) + } else { + lastRefresh = lock.Time + // inform monitor goroutine about successful refresh + select { + case <-ctx.Done(): + case refreshed <- struct{}{}: + } + } + } + } +} + +func monitorLockRefresh(ctx context.Context, lockInfo *lockContext, refreshed <-chan struct{}, forceRefresh chan<- refreshLockRequest, logger func(format string, args ...interface{})) { + // time.Now() might use a monotonic timer which is paused during standby + // convert to unix time to ensure we compare real time values + lastRefresh := time.Now().UnixNano() + pollDuration := 1 * time.Second + if refreshInterval < pollDuration { + // required for TestLockFailedRefresh + pollDuration = refreshInterval / 5 + } + // timers are paused during standby, which is a problem as the refresh timeout + // _must_ expire if the host was too long in standby. Thus fall back to periodic checks + // https://github.com/golang/go/issues/35012 + ticker := time.NewTicker(pollDuration) + defer func() { + ticker.Stop() + lockInfo.cancel() + lockInfo.refreshWG.Done() + }() + + var refreshStaleLockResult chan bool + + for { + select { + case <-ctx.Done(): + debug.Log("terminate expiry monitoring") + return + case <-refreshed: + if refreshStaleLockResult != nil { + // ignore delayed refresh notifications while the stale lock is refreshed + continue + } + lastRefresh = time.Now().UnixNano() + case <-ticker.C: + if time.Now().UnixNano()-lastRefresh < refreshabilityTimeout.Nanoseconds() || refreshStaleLockResult != nil { + continue + } + + debug.Log("trying to refreshStaleLock") + // keep on going if our current lock still exists + refreshReq := refreshLockRequest{ + result: make(chan bool), + } + refreshStaleLockResult = refreshReq.result + + // inform refresh goroutine about forced refresh + select { + case <-ctx.Done(): + case forceRefresh <- refreshReq: + } + case success := <-refreshStaleLockResult: + if success { + lastRefresh = time.Now().UnixNano() + refreshStaleLockResult = nil + continue + } + + logger("Fatal: failed to refresh lock in time\n") + return + } + } +} + +func tryRefreshStaleLock(ctx context.Context, be backend.Backend, lock *restic.Lock, cancel context.CancelFunc, logger func(format string, args ...interface{})) bool { + freeze := backend.AsBackend[backend.FreezeBackend](be) + if freeze != nil { + debug.Log("freezing backend") + freeze.Freeze() + defer freeze.Unfreeze() + } + + err := lock.RefreshStaleLock(ctx) + if err != nil { + logger("failed to refresh stale lock: %v\n", err) + // cancel context while the backend is still frozen to prevent accidental modifications + cancel() + return false + } + + return true +} + +func Unlock(lock *restic.Lock) { + if lock == nil { + return + } + + globalLocks.Lock() + lockInfo, exists := globalLocks.locks[lock] + delete(globalLocks.locks, lock) + globalLocks.Unlock() + + if !exists { + debug.Log("unable to find lock %v in the global list of locks, ignoring", lock) + return + } + lockInfo.cancel() + lockInfo.refreshWG.Wait() +} + +func UnlockAll(code int) (int, error) { + globalLocks.Lock() + locks := globalLocks.locks + debug.Log("unlocking %d locks", len(globalLocks.locks)) + for _, lockInfo := range globalLocks.locks { + lockInfo.cancel() + } + globalLocks.locks = make(map[*restic.Lock]*lockContext) + globalLocks.Unlock() + + for _, lockInfo := range locks { + lockInfo.refreshWG.Wait() + } + + return code, nil +} + +func init() { + globalLocks.locks = make(map[*restic.Lock]*lockContext) +} diff --git a/cmd/restic/lock_test.go b/internal/repository/lock_test.go similarity index 70% rename from cmd/restic/lock_test.go rename to internal/repository/lock_test.go index 83d5f2a5e..fb48a566f 100644 --- a/cmd/restic/lock_test.go +++ b/internal/repository/lock_test.go @@ -1,4 +1,4 @@ -package main +package repository import ( "context" @@ -10,34 +10,35 @@ import ( "time" "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/backend/location" "github.com/restic/restic/internal/backend/mem" "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" ) -func openLockTestRepo(t *testing.T, wrapper backendWrapper) (*repository.Repository, func(), *testEnvironment) { - env, cleanup := withTestEnvironment(t) +type backendWrapper func(r backend.Backend) (backend.Backend, error) - reg := location.NewRegistry() - reg.Register(mem.NewFactory()) - env.gopts.backends = reg - env.gopts.Repo = "mem:" +func openLockTestRepo(t *testing.T, wrapper backendWrapper) restic.Repository { + be := backend.Backend(mem.New()) + // initialize repo + TestRepositoryWithBackend(t, be, 0, Options{}) + // reopen repository to allow injecting a backend wrapper if wrapper != nil { - env.gopts.backendTestHook = wrapper + var err error + be, err = wrapper(be) + rtest.OK(t, err) } - testRunInit(t, env.gopts) - repo, err := OpenRepository(context.TODO(), env.gopts) - test.OK(t, err) - return repo, cleanup, env + repo, err := New(be, Options{}) + rtest.OK(t, err) + rtest.OK(t, repo.SearchKey(context.TODO(), test.TestPassword, 1, "")) + return repo } -func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, env *testEnvironment) (*restic.Lock, context.Context) { - lock, wrappedCtx, err := lockRepository(ctx, repo, false, env.gopts.RetryLock, env.gopts.JSON) +func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, retryLock time.Duration) (*restic.Lock, context.Context) { + lock, wrappedCtx, err := Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) test.OK(t, wrappedCtx.Err()) if lock.Stale() { @@ -47,57 +48,54 @@ func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, } func TestLock(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() + repo := openLockTestRepo(t, nil) - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env) - unlockRepo(lock) + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) + Unlock(lock) if wrappedCtx.Err() == nil { t.Fatal("unlock did not cancel context") } } func TestLockCancel(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() + repo := openLockTestRepo(t, nil) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - lock, wrappedCtx := checkedLockRepo(ctx, t, repo, env) + lock, wrappedCtx := checkedLockRepo(ctx, t, repo, 0) cancel() if wrappedCtx.Err() == nil { t.Fatal("canceled parent context did not cancel context") } - // unlockRepo should not crash - unlockRepo(lock) + // Unlock should not crash + Unlock(lock) } func TestLockUnlockAll(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() + repo := openLockTestRepo(t, nil) - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env) - _, err := unlockAll(0) + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) + _, err := UnlockAll(0) test.OK(t, err) if wrappedCtx.Err() == nil { t.Fatal("canceled parent context did not cancel context") } - // unlockRepo should not crash - unlockRepo(lock) + // Unlock should not crash + Unlock(lock) } func TestLockConflict(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() - repo2, err := OpenRepository(context.TODO(), env.gopts) + repo := openLockTestRepo(t, nil) + repo2, err := New(repo.Backend(), Options{}) test.OK(t, err) + test.OK(t, repo2.SearchKey(context.TODO(), test.TestPassword, 1, "")) - lock, _, err := lockRepository(context.Background(), repo, true, env.gopts.RetryLock, env.gopts.JSON) + lock, _, err := Lock(context.Background(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) - defer unlockRepo(lock) - _, _, err = lockRepository(context.Background(), repo2, false, env.gopts.RetryLock, env.gopts.JSON) + defer Unlock(lock) + _, _, err = Lock(context.Background(), repo2, false, 0, func(msg string) {}, func(format string, args ...interface{}) {}) if err == nil { t.Fatal("second lock should have failed") } @@ -118,10 +116,9 @@ func (b *writeOnceBackend) Save(ctx context.Context, h backend.Handle, rd backen } func TestLockFailedRefresh(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { + repo := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { return &writeOnceBackend{Backend: r}, nil }) - defer cleanup() // reduce locking intervals to be suitable for testing ri, rt := refreshInterval, refreshabilityTimeout @@ -131,7 +128,7 @@ func TestLockFailedRefresh(t *testing.T) { refreshInterval, refreshabilityTimeout = ri, rt }() - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env) + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) select { case <-wrappedCtx.Done(): @@ -139,8 +136,8 @@ func TestLockFailedRefresh(t *testing.T) { case <-time.After(time.Second): t.Fatal("failed lock refresh did not cause context cancellation") } - // unlockRepo should not crash - unlockRepo(lock) + // Unlock should not crash + Unlock(lock) } type loggingBackend struct { @@ -156,13 +153,12 @@ func (b *loggingBackend) Save(ctx context.Context, h backend.Handle, rd backend. } func TestLockSuccessfulRefresh(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { + repo := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { return &loggingBackend{ Backend: r, t: t, }, nil }) - defer cleanup() t.Logf("test for successful lock refresh %v", time.Now()) // reduce locking intervals to be suitable for testing @@ -173,7 +169,7 @@ func TestLockSuccessfulRefresh(t *testing.T) { refreshInterval, refreshabilityTimeout = ri, rt }() - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env) + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) select { case <-wrappedCtx.Done(): @@ -189,8 +185,8 @@ func TestLockSuccessfulRefresh(t *testing.T) { case <-time.After(2 * refreshabilityTimeout): // expected lock refresh to work } - // unlockRepo should not crash - unlockRepo(lock) + // Unlock should not crash + Unlock(lock) } type slowBackend struct { @@ -209,11 +205,10 @@ func (b *slowBackend) Save(ctx context.Context, h backend.Handle, rd backend.Rew func TestLockSuccessfulStaleRefresh(t *testing.T) { var sb *slowBackend - repo, cleanup, env := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { + repo := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { sb = &slowBackend{Backend: r} return sb, nil }) - defer cleanup() t.Logf("test for successful lock refresh %v", time.Now()) // reduce locking intervals to be suitable for testing @@ -224,7 +219,7 @@ func TestLockSuccessfulStaleRefresh(t *testing.T) { refreshInterval, refreshabilityTimeout = ri, rt }() - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env) + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) // delay lock refreshing long enough that the lock would expire sb.m.Lock() sb.sleep = refreshabilityTimeout + refreshInterval @@ -252,21 +247,20 @@ func TestLockSuccessfulStaleRefresh(t *testing.T) { // expected lock refresh to work } - // unlockRepo should not crash - unlockRepo(lock) + // Unlock should not crash + Unlock(lock) } func TestLockWaitTimeout(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() + repo := openLockTestRepo(t, nil) - elock, _, err := lockRepository(context.TODO(), repo, true, env.gopts.RetryLock, env.gopts.JSON) + elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) retryLock := 200 * time.Millisecond start := time.Now() - lock, _, err := lockRepository(context.TODO(), repo, false, retryLock, env.gopts.JSON) + lock, _, err := Lock(context.TODO(), repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) duration := time.Since(start) test.Assert(t, err != nil, @@ -281,10 +275,9 @@ func TestLockWaitTimeout(t *testing.T) { } func TestLockWaitCancel(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() + repo := openLockTestRepo(t, nil) - elock, _, err := lockRepository(context.TODO(), repo, true, env.gopts.RetryLock, env.gopts.JSON) + elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) retryLock := 200 * time.Millisecond @@ -294,7 +287,7 @@ func TestLockWaitCancel(t *testing.T) { ctx, cancel := context.WithCancel(context.TODO()) time.AfterFunc(cancelAfter, cancel) - lock, _, err := lockRepository(ctx, repo, false, retryLock, env.gopts.JSON) + lock, _, err := Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) duration := time.Since(start) test.Assert(t, err != nil, @@ -309,10 +302,9 @@ func TestLockWaitCancel(t *testing.T) { } func TestLockWaitSuccess(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() + repo := openLockTestRepo(t, nil) - elock, _, err := lockRepository(context.TODO(), repo, true, env.gopts.RetryLock, env.gopts.JSON) + elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) retryLock := 200 * time.Millisecond @@ -322,7 +314,7 @@ func TestLockWaitSuccess(t *testing.T) { test.OK(t, elock.Unlock()) }) - lock, _, err := lockRepository(context.TODO(), repo, false, retryLock, env.gopts.JSON) + lock, _, err := Lock(context.TODO(), repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) test.OK(t, lock.Unlock()) From e8df50fa3c0e5e4ab69e5f21aeedbd9ba0c36dee Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 24 Feb 2024 16:45:57 +0100 Subject: [PATCH 03/10] repository: remove global list of locks --- cmd/restic/lock.go | 26 ++++++---------- internal/repository/lock.go | 52 +++++--------------------------- internal/repository/lock_test.go | 50 +++++++++++------------------- 3 files changed, 34 insertions(+), 94 deletions(-) diff --git a/cmd/restic/lock.go b/cmd/restic/lock.go index 20ac4dd34..69d433df1 100644 --- a/cmd/restic/lock.go +++ b/cmd/restic/lock.go @@ -2,16 +2,10 @@ package main import ( "context" - "sync" "github.com/restic/restic/internal/repository" - "github.com/restic/restic/internal/restic" ) -var globalLocks struct { - sync.Once -} - func internalOpenWithLocked(ctx context.Context, gopts GlobalOptions, dryRun bool, exclusive bool) (context.Context, *repository.Repository, func(), error) { repo, err := OpenRepository(ctx, gopts) if err != nil { @@ -20,22 +14,22 @@ func internalOpenWithLocked(ctx context.Context, gopts GlobalOptions, dryRun boo unlock := func() {} if !dryRun { - var lock *restic.Lock - - // make sure that a repository is unlocked properly and after cancel() was - // called by the cleanup handler in global.go - globalLocks.Do(func() { - AddCleanupHandler(repository.UnlockAll) - }) + var lock *repository.Unlocker lock, ctx, err = repository.Lock(ctx, repo, exclusive, gopts.RetryLock, func(msg string) { if !gopts.JSON { Verbosef("%s", msg) } }, Warnf) - unlock = func() { - repository.Unlock(lock) - } + + unlock = lock.Unlock + // make sure that a repository is unlocked properly and after cancel() was + // called by the cleanup handler in global.go + AddCleanupHandler(func(code int) (int, error) { + lock.Unlock() + return code, nil + }) + if err != nil { return nil, nil, nil, err } diff --git a/internal/repository/lock.go b/internal/repository/lock.go index c64cb9222..e3360cac0 100644 --- a/internal/repository/lock.go +++ b/internal/repository/lock.go @@ -18,11 +18,6 @@ type lockContext struct { refreshWG sync.WaitGroup } -var globalLocks struct { - locks map[*restic.Lock]*lockContext - sync.Mutex -} - var ( retrySleepStart = 5 * time.Second retrySleepMax = 60 * time.Second @@ -37,7 +32,7 @@ func minDuration(a, b time.Duration) time.Duration { // Lock wraps the ctx such that it is cancelled when the repository is unlocked // cancelling the original context also stops the lock refresh -func Lock(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*restic.Lock, context.Context, error) { +func Lock(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*Unlocker, context.Context, error) { lockFn := restic.NewLock if exclusive { @@ -97,13 +92,10 @@ retryLoop: refreshChan := make(chan struct{}) forceRefreshChan := make(chan refreshLockRequest) - globalLocks.Lock() - globalLocks.locks[lock] = lockInfo go refreshLocks(ctx, repo.Backend(), lockInfo, refreshChan, forceRefreshChan, logger) go monitorLockRefresh(ctx, lockInfo, refreshChan, forceRefreshChan, logger) - globalLocks.Unlock() - return lock, ctx, err + return &Unlocker{lockInfo}, ctx, nil } var refreshInterval = 5 * time.Minute @@ -261,41 +253,11 @@ func tryRefreshStaleLock(ctx context.Context, be backend.Backend, lock *restic.L return true } -func Unlock(lock *restic.Lock) { - if lock == nil { - return - } - - globalLocks.Lock() - lockInfo, exists := globalLocks.locks[lock] - delete(globalLocks.locks, lock) - globalLocks.Unlock() - - if !exists { - debug.Log("unable to find lock %v in the global list of locks, ignoring", lock) - return - } - lockInfo.cancel() - lockInfo.refreshWG.Wait() +type Unlocker struct { + info *lockContext } -func UnlockAll(code int) (int, error) { - globalLocks.Lock() - locks := globalLocks.locks - debug.Log("unlocking %d locks", len(globalLocks.locks)) - for _, lockInfo := range globalLocks.locks { - lockInfo.cancel() - } - globalLocks.locks = make(map[*restic.Lock]*lockContext) - globalLocks.Unlock() - - for _, lockInfo := range locks { - lockInfo.refreshWG.Wait() - } - - return code, nil -} - -func init() { - globalLocks.locks = make(map[*restic.Lock]*lockContext) +func (l *Unlocker) Unlock() { + l.info.cancel() + l.info.refreshWG.Wait() } diff --git a/internal/repository/lock_test.go b/internal/repository/lock_test.go index fb48a566f..2975ed7ff 100644 --- a/internal/repository/lock_test.go +++ b/internal/repository/lock_test.go @@ -37,11 +37,11 @@ func openLockTestRepo(t *testing.T, wrapper backendWrapper) restic.Repository { return repo } -func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, retryLock time.Duration) (*restic.Lock, context.Context) { +func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, retryLock time.Duration) (*Unlocker, context.Context) { lock, wrappedCtx, err := Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) test.OK(t, wrappedCtx.Err()) - if lock.Stale() { + if lock.info.lock.Stale() { t.Fatal("lock returned stale lock") } return lock, wrappedCtx @@ -51,7 +51,7 @@ func TestLock(t *testing.T) { repo := openLockTestRepo(t, nil) lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) - Unlock(lock) + lock.Unlock() if wrappedCtx.Err() == nil { t.Fatal("unlock did not cancel context") } @@ -69,21 +69,7 @@ func TestLockCancel(t *testing.T) { } // Unlock should not crash - Unlock(lock) -} - -func TestLockUnlockAll(t *testing.T) { - repo := openLockTestRepo(t, nil) - - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) - _, err := UnlockAll(0) - test.OK(t, err) - if wrappedCtx.Err() == nil { - t.Fatal("canceled parent context did not cancel context") - } - - // Unlock should not crash - Unlock(lock) + lock.Unlock() } func TestLockConflict(t *testing.T) { @@ -94,7 +80,7 @@ func TestLockConflict(t *testing.T) { lock, _, err := Lock(context.Background(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) - defer Unlock(lock) + defer lock.Unlock() _, _, err = Lock(context.Background(), repo2, false, 0, func(msg string) {}, func(format string, args ...interface{}) {}) if err == nil { t.Fatal("second lock should have failed") @@ -137,7 +123,7 @@ func TestLockFailedRefresh(t *testing.T) { t.Fatal("failed lock refresh did not cause context cancellation") } // Unlock should not crash - Unlock(lock) + lock.Unlock() } type loggingBackend struct { @@ -186,7 +172,7 @@ func TestLockSuccessfulRefresh(t *testing.T) { // expected lock refresh to work } // Unlock should not crash - Unlock(lock) + lock.Unlock() } type slowBackend struct { @@ -248,19 +234,21 @@ func TestLockSuccessfulStaleRefresh(t *testing.T) { } // Unlock should not crash - Unlock(lock) + lock.Unlock() } func TestLockWaitTimeout(t *testing.T) { + t.Parallel() repo := openLockTestRepo(t, nil) elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) + defer elock.Unlock() retryLock := 200 * time.Millisecond start := time.Now() - lock, _, err := Lock(context.TODO(), repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) + _, _, err = Lock(context.TODO(), repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) duration := time.Since(start) test.Assert(t, err != nil, @@ -269,16 +257,15 @@ func TestLockWaitTimeout(t *testing.T) { "create normal lock with exclusively locked repo didn't return the correct error") test.Assert(t, retryLock <= duration && duration < retryLock*3/2, "create normal lock with exclusively locked repo didn't wait for the specified timeout") - - test.OK(t, lock.Unlock()) - test.OK(t, elock.Unlock()) } func TestLockWaitCancel(t *testing.T) { + t.Parallel() repo := openLockTestRepo(t, nil) elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) + defer elock.Unlock() retryLock := 200 * time.Millisecond cancelAfter := 40 * time.Millisecond @@ -287,7 +274,7 @@ func TestLockWaitCancel(t *testing.T) { ctx, cancel := context.WithCancel(context.TODO()) time.AfterFunc(cancelAfter, cancel) - lock, _, err := Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) + _, _, err = Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) duration := time.Since(start) test.Assert(t, err != nil, @@ -296,12 +283,10 @@ func TestLockWaitCancel(t *testing.T) { "create normal lock with exclusively locked repo didn't return the correct error") test.Assert(t, cancelAfter <= duration && duration < retryLock-10*time.Millisecond, "create normal lock with exclusively locked repo didn't return in time, duration %v", duration) - - test.OK(t, lock.Unlock()) - test.OK(t, elock.Unlock()) } func TestLockWaitSuccess(t *testing.T) { + t.Parallel() repo := openLockTestRepo(t, nil) elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) @@ -311,11 +296,10 @@ func TestLockWaitSuccess(t *testing.T) { unlockAfter := 40 * time.Millisecond time.AfterFunc(unlockAfter, func() { - test.OK(t, elock.Unlock()) + elock.Unlock() }) lock, _, err := Lock(context.TODO(), repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) - - test.OK(t, lock.Unlock()) + lock.Unlock() } From 044e8bf82157abcf9623465db608a1994c0e9dac Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 24 Feb 2024 17:07:14 +0100 Subject: [PATCH 04/10] repository: parallelize lock tests --- internal/repository/lock.go | 63 +++++++++++++++++------------- internal/repository/lock_test.go | 66 +++++++++++++++++--------------- 2 files changed, 72 insertions(+), 57 deletions(-) diff --git a/internal/repository/lock.go b/internal/repository/lock.go index e3360cac0..fd8214cd1 100644 --- a/internal/repository/lock.go +++ b/internal/repository/lock.go @@ -18,21 +18,31 @@ type lockContext struct { refreshWG sync.WaitGroup } -var ( - retrySleepStart = 5 * time.Second - retrySleepMax = 60 * time.Second -) +type locker struct { + retrySleepStart time.Duration + retrySleepMax time.Duration + refreshInterval time.Duration + refreshabilityTimeout time.Duration +} -func minDuration(a, b time.Duration) time.Duration { - if a <= b { - return a - } - return b +const defaultRefreshInterval = 5 * time.Minute + +var lockerInst = &locker{ + retrySleepStart: 5 * time.Second, + retrySleepMax: 60 * time.Second, + refreshInterval: defaultRefreshInterval, + // consider a lock refresh failed a bit before the lock actually becomes stale + // the difference allows to compensate for a small time drift between clients. + refreshabilityTimeout: restic.StaleLockTimeout - defaultRefreshInterval*3/2, +} + +func Lock(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*Unlocker, context.Context, error) { + return lockerInst.Lock(ctx, repo, exclusive, retryLock, printRetry, logger) } // Lock wraps the ctx such that it is cancelled when the repository is unlocked // cancelling the original context also stops the lock refresh -func Lock(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*Unlocker, context.Context, error) { +func (l *locker) Lock(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*Unlocker, context.Context, error) { lockFn := restic.NewLock if exclusive { @@ -42,7 +52,7 @@ func Lock(ctx context.Context, repo restic.Repository, exclusive bool, retryLock var lock *restic.Lock var err error - retrySleep := minDuration(retrySleepStart, retryLock) + retrySleep := minDuration(l.retrySleepStart, retryLock) retryMessagePrinted := false retryTimeout := time.After(retryLock) @@ -68,7 +78,7 @@ retryLoop: lock, err = lockFn(ctx, repo) break retryLoop case <-retrySleepCh: - retrySleep = minDuration(retrySleep*2, retrySleepMax) + retrySleep = minDuration(retrySleep*2, l.retrySleepMax) } } else { // anything else, either a successful lock or another error @@ -92,26 +102,27 @@ retryLoop: refreshChan := make(chan struct{}) forceRefreshChan := make(chan refreshLockRequest) - go refreshLocks(ctx, repo.Backend(), lockInfo, refreshChan, forceRefreshChan, logger) - go monitorLockRefresh(ctx, lockInfo, refreshChan, forceRefreshChan, logger) + go l.refreshLocks(ctx, repo.Backend(), lockInfo, refreshChan, forceRefreshChan, logger) + go l.monitorLockRefresh(ctx, lockInfo, refreshChan, forceRefreshChan, logger) return &Unlocker{lockInfo}, ctx, nil } -var refreshInterval = 5 * time.Minute - -// consider a lock refresh failed a bit before the lock actually becomes stale -// the difference allows to compensate for a small time drift between clients. -var refreshabilityTimeout = restic.StaleLockTimeout - refreshInterval*3/2 +func minDuration(a, b time.Duration) time.Duration { + if a <= b { + return a + } + return b +} type refreshLockRequest struct { result chan bool } -func refreshLocks(ctx context.Context, backend backend.Backend, lockInfo *lockContext, refreshed chan<- struct{}, forceRefresh <-chan refreshLockRequest, logger func(format string, args ...interface{})) { +func (l *locker) refreshLocks(ctx context.Context, backend backend.Backend, lockInfo *lockContext, refreshed chan<- struct{}, forceRefresh <-chan refreshLockRequest, logger func(format string, args ...interface{})) { debug.Log("start") lock := lockInfo.lock - ticker := time.NewTicker(refreshInterval) + ticker := time.NewTicker(l.refreshInterval) lastRefresh := lock.Time defer func() { @@ -151,7 +162,7 @@ func refreshLocks(ctx context.Context, backend backend.Backend, lockInfo *lockCo } case <-ticker.C: - if time.Since(lastRefresh) > refreshabilityTimeout { + if time.Since(lastRefresh) > l.refreshabilityTimeout { // the lock is too old, wait until the expiry monitor cancels the context continue } @@ -172,14 +183,14 @@ func refreshLocks(ctx context.Context, backend backend.Backend, lockInfo *lockCo } } -func monitorLockRefresh(ctx context.Context, lockInfo *lockContext, refreshed <-chan struct{}, forceRefresh chan<- refreshLockRequest, logger func(format string, args ...interface{})) { +func (l *locker) monitorLockRefresh(ctx context.Context, lockInfo *lockContext, refreshed <-chan struct{}, forceRefresh chan<- refreshLockRequest, logger func(format string, args ...interface{})) { // time.Now() might use a monotonic timer which is paused during standby // convert to unix time to ensure we compare real time values lastRefresh := time.Now().UnixNano() pollDuration := 1 * time.Second - if refreshInterval < pollDuration { + if l.refreshInterval < pollDuration { // required for TestLockFailedRefresh - pollDuration = refreshInterval / 5 + pollDuration = l.refreshInterval / 5 } // timers are paused during standby, which is a problem as the refresh timeout // _must_ expire if the host was too long in standby. Thus fall back to periodic checks @@ -205,7 +216,7 @@ func monitorLockRefresh(ctx context.Context, lockInfo *lockContext, refreshed <- } lastRefresh = time.Now().UnixNano() case <-ticker.C: - if time.Now().UnixNano()-lastRefresh < refreshabilityTimeout.Nanoseconds() || refreshStaleLockResult != nil { + if time.Now().UnixNano()-lastRefresh < l.refreshabilityTimeout.Nanoseconds() || refreshStaleLockResult != nil { continue } diff --git a/internal/repository/lock_test.go b/internal/repository/lock_test.go index 2975ed7ff..360ee2b23 100644 --- a/internal/repository/lock_test.go +++ b/internal/repository/lock_test.go @@ -37,8 +37,8 @@ func openLockTestRepo(t *testing.T, wrapper backendWrapper) restic.Repository { return repo } -func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, retryLock time.Duration) (*Unlocker, context.Context) { - lock, wrappedCtx, err := Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) +func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, lockerInst *locker, retryLock time.Duration) (*Unlocker, context.Context) { + lock, wrappedCtx, err := lockerInst.Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) test.OK(t, wrappedCtx.Err()) if lock.info.lock.Stale() { @@ -48,9 +48,10 @@ func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, } func TestLock(t *testing.T) { + t.Parallel() repo := openLockTestRepo(t, nil) - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, lockerInst, 0) lock.Unlock() if wrappedCtx.Err() == nil { t.Fatal("unlock did not cancel context") @@ -58,11 +59,12 @@ func TestLock(t *testing.T) { } func TestLockCancel(t *testing.T) { + t.Parallel() repo := openLockTestRepo(t, nil) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - lock, wrappedCtx := checkedLockRepo(ctx, t, repo, 0) + lock, wrappedCtx := checkedLockRepo(ctx, t, repo, lockerInst, 0) cancel() if wrappedCtx.Err() == nil { t.Fatal("canceled parent context did not cancel context") @@ -73,6 +75,7 @@ func TestLockCancel(t *testing.T) { } func TestLockConflict(t *testing.T) { + t.Parallel() repo := openLockTestRepo(t, nil) repo2, err := New(repo.Backend(), Options{}) test.OK(t, err) @@ -102,19 +105,19 @@ func (b *writeOnceBackend) Save(ctx context.Context, h backend.Handle, rd backen } func TestLockFailedRefresh(t *testing.T) { + t.Parallel() repo := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { return &writeOnceBackend{Backend: r}, nil }) // reduce locking intervals to be suitable for testing - ri, rt := refreshInterval, refreshabilityTimeout - refreshInterval = 20 * time.Millisecond - refreshabilityTimeout = 100 * time.Millisecond - defer func() { - refreshInterval, refreshabilityTimeout = ri, rt - }() - - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) + li := &locker{ + retrySleepStart: lockerInst.retrySleepStart, + retrySleepMax: lockerInst.retrySleepMax, + refreshInterval: 20 * time.Millisecond, + refreshabilityTimeout: 100 * time.Millisecond, + } + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, li, 0) select { case <-wrappedCtx.Done(): @@ -139,6 +142,7 @@ func (b *loggingBackend) Save(ctx context.Context, h backend.Handle, rd backend. } func TestLockSuccessfulRefresh(t *testing.T) { + t.Parallel() repo := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { return &loggingBackend{ Backend: r, @@ -148,14 +152,13 @@ func TestLockSuccessfulRefresh(t *testing.T) { t.Logf("test for successful lock refresh %v", time.Now()) // reduce locking intervals to be suitable for testing - ri, rt := refreshInterval, refreshabilityTimeout - refreshInterval = 60 * time.Millisecond - refreshabilityTimeout = 500 * time.Millisecond - defer func() { - refreshInterval, refreshabilityTimeout = ri, rt - }() - - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) + li := &locker{ + retrySleepStart: lockerInst.retrySleepStart, + retrySleepMax: lockerInst.retrySleepMax, + refreshInterval: 60 * time.Millisecond, + refreshabilityTimeout: 500 * time.Millisecond, + } + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, li, 0) select { case <-wrappedCtx.Done(): @@ -168,7 +171,7 @@ func TestLockSuccessfulRefresh(t *testing.T) { buf = buf[:n] t.Log(string(buf)) - case <-time.After(2 * refreshabilityTimeout): + case <-time.After(2 * li.refreshabilityTimeout): // expected lock refresh to work } // Unlock should not crash @@ -190,6 +193,7 @@ func (b *slowBackend) Save(ctx context.Context, h backend.Handle, rd backend.Rew } func TestLockSuccessfulStaleRefresh(t *testing.T) { + t.Parallel() var sb *slowBackend repo := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { sb = &slowBackend{Backend: r} @@ -198,17 +202,17 @@ func TestLockSuccessfulStaleRefresh(t *testing.T) { t.Logf("test for successful lock refresh %v", time.Now()) // reduce locking intervals to be suitable for testing - ri, rt := refreshInterval, refreshabilityTimeout - refreshInterval = 10 * time.Millisecond - refreshabilityTimeout = 50 * time.Millisecond - defer func() { - refreshInterval, refreshabilityTimeout = ri, rt - }() + li := &locker{ + retrySleepStart: lockerInst.retrySleepStart, + retrySleepMax: lockerInst.retrySleepMax, + refreshInterval: 10 * time.Millisecond, + refreshabilityTimeout: 50 * time.Millisecond, + } - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, li, 0) // delay lock refreshing long enough that the lock would expire sb.m.Lock() - sb.sleep = refreshabilityTimeout + refreshInterval + sb.sleep = li.refreshabilityTimeout + li.refreshInterval sb.m.Unlock() select { @@ -216,7 +220,7 @@ func TestLockSuccessfulStaleRefresh(t *testing.T) { // don't call t.Fatal to allow the lock to be properly cleaned up t.Error("lock refresh failed", time.Now()) - case <-time.After(refreshabilityTimeout): + case <-time.After(li.refreshabilityTimeout): } // reset slow backend sb.m.Lock() @@ -229,7 +233,7 @@ func TestLockSuccessfulStaleRefresh(t *testing.T) { // don't call t.Fatal to allow the lock to be properly cleaned up t.Error("lock refresh failed", time.Now()) - case <-time.After(3 * refreshabilityTimeout): + case <-time.After(3 * li.refreshabilityTimeout): // expected lock refresh to work } From 3ba1fa3cee58aabaee0550b19d2bcb9a103f82d0 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 24 Feb 2024 17:20:10 +0100 Subject: [PATCH 05/10] repository: remove a few global variables --- internal/repository/key.go | 2 +- internal/repository/testing.go | 13 +++++-------- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/internal/repository/key.go b/internal/repository/key.go index d9f8d8e17..4d597da4d 100644 --- a/internal/repository/key.go +++ b/internal/repository/key.go @@ -47,7 +47,7 @@ type Key struct { // calibrated on the first run of AddKey(). var Params *crypto.Params -var ( +const ( // KDFTimeout specifies the maximum runtime for the KDF. KDFTimeout = 500 * time.Millisecond diff --git a/internal/repository/testing.go b/internal/repository/testing.go index dbbdbeb07..faa40c70a 100644 --- a/internal/repository/testing.go +++ b/internal/repository/testing.go @@ -17,13 +17,6 @@ import ( "github.com/restic/chunker" ) -// testKDFParams are the parameters for the KDF to be used during testing. -var testKDFParams = crypto.Params{ - N: 128, - R: 1, - P: 1, -} - type logger interface { Logf(format string, args ...interface{}) } @@ -31,7 +24,11 @@ type logger interface { // TestUseLowSecurityKDFParameters configures low-security KDF parameters for testing. func TestUseLowSecurityKDFParameters(t logger) { t.Logf("using low-security KDF parameters for test") - Params = &testKDFParams + Params = &crypto.Params{ + N: 128, + R: 1, + P: 1, + } } // TestBackend returns a fully configured in-memory backend. From dc441c57a76874bc503b71b9b2946b0af31a7934 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 24 Feb 2024 21:45:24 +0100 Subject: [PATCH 06/10] repository: unify repository initialization in tests Tests should use a helper from internal/repository/testing.go to construct a Repository object. --- internal/checker/checker_test.go | 36 +++++++------------------- internal/index/index_parallel_test.go | 4 +-- internal/repository/lock_test.go | 9 ++----- internal/repository/repository_test.go | 11 +++----- internal/repository/testing.go | 13 +++++++++- 5 files changed, 27 insertions(+), 46 deletions(-) diff --git a/internal/checker/checker_test.go b/internal/checker/checker_test.go index cca5a582c..b0fa4e3e3 100644 --- a/internal/checker/checker_test.go +++ b/internal/checker/checker_test.go @@ -72,11 +72,9 @@ func assertOnlyMixedPackHints(t *testing.T, hints []error) { } func TestCheckRepo(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - chkr := checker.New(repo, false) hints, errs := chkr.LoadIndex(context.TODO(), nil) if len(errs) > 0 { @@ -92,11 +90,9 @@ func TestCheckRepo(t *testing.T) { } func TestMissingPack(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - packHandle := backend.Handle{ Type: restic.PackFile, Name: "657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6", @@ -123,11 +119,9 @@ func TestMissingPack(t *testing.T) { } func TestUnreferencedPack(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - // index 3f1a only references pack 60e0 packID := "60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e" indexHandle := backend.Handle{ @@ -156,11 +150,9 @@ func TestUnreferencedPack(t *testing.T) { } func TestUnreferencedBlobs(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - snapshotHandle := backend.Handle{ Type: restic.SnapshotFile, Name: "51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02", @@ -195,11 +187,9 @@ func TestUnreferencedBlobs(t *testing.T) { } func TestModifiedIndex(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - done := make(chan struct{}) defer close(done) @@ -274,11 +264,9 @@ func TestModifiedIndex(t *testing.T) { var checkerDuplicateIndexTestData = filepath.Join("testdata", "duplicate-packs-in-index-test-repo.tar.gz") func TestDuplicatePacksInIndex(t *testing.T) { - repodir, cleanup := test.Env(t, checkerDuplicateIndexTestData) + repo, cleanup := repository.TestFromFixture(t, checkerDuplicateIndexTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - chkr := checker.New(repo, false) hints, errs := chkr.LoadIndex(context.TODO(), nil) if len(hints) == 0 { @@ -342,9 +330,7 @@ func TestCheckerModifiedData(t *testing.T) { t.Logf("archived as %v", sn.ID().Str()) beError := &errorBackend{Backend: repo.Backend()} - checkRepo, err := repository.New(beError, repository.Options{}) - test.OK(t, err) - test.OK(t, checkRepo.SearchKey(context.TODO(), test.TestPassword, 5, "")) + checkRepo := repository.TestOpenBackend(t, beError) chkr := checker.New(checkRepo, false) @@ -399,10 +385,8 @@ func (r *loadTreesOnceRepository) LoadTree(ctx context.Context, id restic.ID) (* } func TestCheckerNoDuplicateTreeDecodes(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - - repo := repository.TestOpenLocal(t, repodir) checkRepo := &loadTreesOnceRepository{ Repository: repo, loadedTrees: restic.NewIDSet(), @@ -549,9 +533,7 @@ func TestCheckerBlobTypeConfusion(t *testing.T) { } func loadBenchRepository(t *testing.B) (*checker.Checker, restic.Repository, func()) { - repodir, cleanup := test.Env(t, checkerTestData) - - repo := repository.TestOpenLocal(t, repodir) + repo, cleanup := repository.TestFromFixture(t, checkerTestData) chkr := checker.New(repo, false) hints, errs := chkr.LoadIndex(context.TODO(), nil) diff --git a/internal/index/index_parallel_test.go b/internal/index/index_parallel_test.go index db4853e19..5cb8d299d 100644 --- a/internal/index/index_parallel_test.go +++ b/internal/index/index_parallel_test.go @@ -15,11 +15,9 @@ import ( var repoFixture = filepath.Join("..", "repository", "testdata", "test-repo.tar.gz") func TestRepositoryForAllIndexes(t *testing.T) { - repodir, cleanup := rtest.Env(t, repoFixture) + repo, cleanup := repository.TestFromFixture(t, repoFixture) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - expectedIndexIDs := restic.NewIDSet() rtest.OK(t, repo.List(context.TODO(), restic.IndexFile, func(id restic.ID, size int64) error { expectedIndexIDs.Insert(id) diff --git a/internal/repository/lock_test.go b/internal/repository/lock_test.go index 360ee2b23..644fc6b37 100644 --- a/internal/repository/lock_test.go +++ b/internal/repository/lock_test.go @@ -31,10 +31,7 @@ func openLockTestRepo(t *testing.T, wrapper backendWrapper) restic.Repository { rtest.OK(t, err) } - repo, err := New(be, Options{}) - rtest.OK(t, err) - rtest.OK(t, repo.SearchKey(context.TODO(), test.TestPassword, 1, "")) - return repo + return TestOpenBackend(t, be) } func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, lockerInst *locker, retryLock time.Duration) (*Unlocker, context.Context) { @@ -77,9 +74,7 @@ func TestLockCancel(t *testing.T) { func TestLockConflict(t *testing.T) { t.Parallel() repo := openLockTestRepo(t, nil) - repo2, err := New(repo.Backend(), Options{}) - test.OK(t, err) - test.OK(t, repo2.SearchKey(context.TODO(), test.TestPassword, 1, "")) + repo2 := TestOpenBackend(t, repo.Backend()) lock, _, err := Lock(context.Background(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index 0fa8e4d4a..98ff560fe 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -221,10 +221,9 @@ func benchmarkLoadUnpacked(b *testing.B, version uint) { var repoFixture = filepath.Join("testdata", "test-repo.tar.gz") func TestRepositoryLoadIndex(t *testing.T) { - repodir, cleanup := rtest.Env(t, repoFixture) + repo, cleanup := repository.TestFromFixture(t, repoFixture) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) rtest.OK(t, repo.LoadIndex(context.TODO(), nil)) } @@ -243,7 +242,7 @@ func loadIndex(ctx context.Context, repo restic.LoaderUnpacked, id restic.ID) (* } func TestRepositoryLoadUnpackedBroken(t *testing.T) { - repodir, cleanup := rtest.Env(t, repoFixture) + repo, cleanup := repository.TestFromFixture(t, repoFixture) defer cleanup() data := rtest.Random(23, 12345) @@ -252,7 +251,6 @@ func TestRepositoryLoadUnpackedBroken(t *testing.T) { // damage buffer data[0] ^= 0xff - repo := repository.TestOpenLocal(t, repodir) // store broken file err := repo.Backend().Save(context.TODO(), h, backend.NewByteReader(data, nil)) rtest.OK(t, err) @@ -289,10 +287,7 @@ func TestRepositoryLoadUnpackedRetryBroken(t *testing.T) { be, err := local.Open(context.TODO(), local.Config{Path: repodir, Connections: 2}) rtest.OK(t, err) - repo, err := repository.New(&damageOnceBackend{Backend: be}, repository.Options{}) - rtest.OK(t, err) - err = repo.SearchKey(context.TODO(), rtest.TestPassword, 10, "") - rtest.OK(t, err) + repo := repository.TestOpenBackend(t, &damageOnceBackend{Backend: be}) rtest.OK(t, repo.LoadIndex(context.TODO(), nil)) } diff --git a/internal/repository/testing.go b/internal/repository/testing.go index faa40c70a..3a566565f 100644 --- a/internal/repository/testing.go +++ b/internal/repository/testing.go @@ -95,8 +95,15 @@ func TestRepositoryWithVersion(t testing.TB, version uint) restic.Repository { return TestRepositoryWithBackend(t, nil, version, opts) } +func TestFromFixture(t testing.TB, repoFixture string) (restic.Repository, func()) { + repodir, cleanup := test.Env(t, repoFixture) + repo := TestOpenLocal(t, repodir) + + return repo, cleanup +} + // TestOpenLocal opens a local repository. -func TestOpenLocal(t testing.TB, dir string) (r restic.Repository) { +func TestOpenLocal(t testing.TB, dir string) restic.Repository { var be backend.Backend be, err := local.Open(context.TODO(), local.Config{Path: dir, Connections: 2}) if err != nil { @@ -105,6 +112,10 @@ func TestOpenLocal(t testing.TB, dir string) (r restic.Repository) { be = retry.New(be, 3, nil, nil) + return TestOpenBackend(t, be) +} + +func TestOpenBackend(t testing.TB, be backend.Backend) restic.Repository { repo, err := New(be, Options{}) if err != nil { t.Fatal(err) From d18726cd70527ff5d77f0d418585659834684756 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 24 Feb 2024 21:52:28 +0100 Subject: [PATCH 07/10] ls: add missing read lock As `ls` reads data from the repository, it must acquire a read lock unless `--no-lock` was specified. The old behavior is equivalent to `ls --no-lock`. --- cmd/restic/cmd_ls.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index b0246625e..c4fb32de3 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -309,10 +309,11 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri return false } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { From 8155dbe711b9636ca7b02e7833595820600657a3 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 24 Feb 2024 21:54:39 +0100 Subject: [PATCH 08/10] correctly lock repository in integration tests --- cmd/restic/cmd_backup_integration_test.go | 18 ++-------- cmd/restic/cmd_mount_integration_test.go | 27 +++++++-------- cmd/restic/cmd_rewrite_integration_test.go | 7 ++-- cmd/restic/integration_helpers_test.go | 39 ++++++++++++++++------ cmd/restic/integration_test.go | 5 +-- 5 files changed, 52 insertions(+), 44 deletions(-) diff --git a/cmd/restic/cmd_backup_integration_test.go b/cmd/restic/cmd_backup_integration_test.go index 0bc4a9eaa..75de1341c 100644 --- a/cmd/restic/cmd_backup_integration_test.go +++ b/cmd/restic/cmd_backup_integration_test.go @@ -9,7 +9,6 @@ import ( "runtime" "testing" - "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" @@ -250,29 +249,18 @@ func TestBackupTreeLoadError(t *testing.T) { opts := BackupOptions{} // Backup a subdirectory first, such that we can remove the tree pack for the subdirectory testRunBackup(t, env.testdata, []string{"test"}, opts, env.gopts) - - r, err := OpenRepository(context.TODO(), env.gopts) - rtest.OK(t, err) - rtest.OK(t, r.LoadIndex(context.TODO(), nil)) - treePacks := restic.NewIDSet() - r.Index().Each(context.TODO(), func(pb restic.PackedBlob) { - if pb.Type == restic.TreeBlob { - treePacks.Insert(pb.PackID) - } - }) + treePacks := listTreePacks(env.gopts, t) testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) testRunCheck(t, env.gopts) // delete the subdirectory pack first - for id := range treePacks { - rtest.OK(t, r.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()})) - } + removePacks(env.gopts, t, treePacks) testRunRebuildIndex(t, env.gopts) // now the repo is missing the tree blob in the index; check should report this testRunCheckMustFail(t, env.gopts) // second backup should report an error but "heal" this situation - err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) + err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) rtest.Assert(t, err != nil, "backup should have reported an error for the subdirectory") testRunCheck(t, env.gopts) diff --git a/cmd/restic/cmd_mount_integration_test.go b/cmd/restic/cmd_mount_integration_test.go index d2025a395..590e15030 100644 --- a/cmd/restic/cmd_mount_integration_test.go +++ b/cmd/restic/cmd_mount_integration_test.go @@ -12,7 +12,6 @@ import ( "testing" "time" - "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) @@ -86,12 +85,12 @@ func listSnapshots(t testing.TB, dir string) []string { return names } -func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Repository, mountpoint, repodir string, snapshotIDs restic.IDs, expectedSnapshotsInFuseDir int) { +func checkSnapshots(t testing.TB, gopts GlobalOptions, mountpoint string, snapshotIDs restic.IDs, expectedSnapshotsInFuseDir int) { t.Logf("checking for %d snapshots: %v", len(snapshotIDs), snapshotIDs) var wg sync.WaitGroup wg.Add(1) - go testRunMount(t, global, mountpoint, &wg) + go testRunMount(t, gopts, mountpoint, &wg) waitForMount(t, mountpoint) defer wg.Wait() defer testRunUmount(t, mountpoint) @@ -100,7 +99,7 @@ func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Reposit t.Fatal(`virtual directory "snapshots" doesn't exist`) } - ids := listSnapshots(t, repodir) + ids := listSnapshots(t, gopts.Repo) t.Logf("found %v snapshots in repo: %v", len(ids), ids) namesInSnapshots := listSnapshots(t, mountpoint) @@ -124,6 +123,10 @@ func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Reposit } } + _, repo, unlock, err := openWithReadLock(context.TODO(), gopts, false) + rtest.OK(t, err) + defer unlock() + for _, id := range snapshotIDs { snapshot, err := restic.LoadSnapshot(context.TODO(), repo, id) rtest.OK(t, err) @@ -166,10 +169,7 @@ func TestMount(t *testing.T) { testRunInit(t, env.gopts) - repo, err := OpenRepository(context.TODO(), env.gopts) - rtest.OK(t, err) - - checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, []restic.ID{}, 0) + checkSnapshots(t, env.gopts, env.mountpoint, []restic.ID{}, 0) rtest.SetupTarTestFixture(t, env.testdata, filepath.Join("testdata", "backup-data.tar.gz")) @@ -179,7 +179,7 @@ func TestMount(t *testing.T) { rtest.Assert(t, len(snapshotIDs) == 1, "expected one snapshot, got %v", snapshotIDs) - checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 2) + checkSnapshots(t, env.gopts, env.mountpoint, snapshotIDs, 2) // second backup, implicit incremental testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts) @@ -187,7 +187,7 @@ func TestMount(t *testing.T) { rtest.Assert(t, len(snapshotIDs) == 2, "expected two snapshots, got %v", snapshotIDs) - checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 3) + checkSnapshots(t, env.gopts, env.mountpoint, snapshotIDs, 3) // third backup, explicit incremental bopts := BackupOptions{Parent: snapshotIDs[0].String()} @@ -196,7 +196,7 @@ func TestMount(t *testing.T) { rtest.Assert(t, len(snapshotIDs) == 3, "expected three snapshots, got %v", snapshotIDs) - checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 4) + checkSnapshots(t, env.gopts, env.mountpoint, snapshotIDs, 4) } func TestMountSameTimestamps(t *testing.T) { @@ -211,14 +211,11 @@ func TestMountSameTimestamps(t *testing.T) { rtest.SetupTarTestFixture(t, env.base, filepath.Join("testdata", "repo-same-timestamps.tar.gz")) - repo, err := OpenRepository(context.TODO(), env.gopts) - rtest.OK(t, err) - ids := []restic.ID{ restic.TestParseID("280303689e5027328889a06d718b729e96a1ce6ae9ef8290bff550459ae611ee"), restic.TestParseID("75ad6cdc0868e082f2596d5ab8705e9f7d87316f5bf5690385eeff8dbe49d9f5"), restic.TestParseID("5fd0d8b2ef0fa5d23e58f1e460188abb0f525c0f0c4af8365a1280c807a80a1b"), } - checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, ids, 4) + checkSnapshots(t, env.gopts, env.mountpoint, ids, 4) } diff --git a/cmd/restic/cmd_rewrite_integration_test.go b/cmd/restic/cmd_rewrite_integration_test.go index 532855f57..71d6a60a5 100644 --- a/cmd/restic/cmd_rewrite_integration_test.go +++ b/cmd/restic/cmd_rewrite_integration_test.go @@ -78,8 +78,11 @@ func testRewriteMetadata(t *testing.T, metadata snapshotMetadataArgs) { createBasicRewriteRepo(t, env) testRunRewriteExclude(t, env.gopts, []string{}, true, metadata) - repo, _ := OpenRepository(context.TODO(), env.gopts) - snapshots, err := restic.TestLoadAllSnapshots(context.TODO(), repo, nil) + ctx, repo, unlock, err := openWithReadLock(context.TODO(), env.gopts, false) + rtest.OK(t, err) + defer unlock() + + snapshots, err := restic.TestLoadAllSnapshots(ctx, repo, nil) rtest.OK(t, err) rtest.Assert(t, len(snapshots) == 1, "expected one snapshot, got %v", len(snapshots)) newSnapshot := snapshots[0] diff --git a/cmd/restic/integration_helpers_test.go b/cmd/restic/integration_helpers_test.go index 184609d40..c87e1071e 100644 --- a/cmd/restic/integration_helpers_test.go +++ b/cmd/restic/integration_helpers_test.go @@ -232,47 +232,66 @@ func testSetupBackupData(t testing.TB, env *testEnvironment) string { } func listPacks(gopts GlobalOptions, t *testing.T) restic.IDSet { - r, err := OpenRepository(context.TODO(), gopts) + ctx, r, unlock, err := openWithReadLock(context.TODO(), gopts, false) rtest.OK(t, err) + defer unlock() packs := restic.NewIDSet() - rtest.OK(t, r.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error { + rtest.OK(t, r.List(ctx, restic.PackFile, func(id restic.ID, size int64) error { packs.Insert(id) return nil })) return packs } -func removePacks(gopts GlobalOptions, t testing.TB, remove restic.IDSet) { - r, err := OpenRepository(context.TODO(), gopts) +func listTreePacks(gopts GlobalOptions, t *testing.T) restic.IDSet { + ctx, r, unlock, err := openWithReadLock(context.TODO(), gopts, false) rtest.OK(t, err) + defer unlock() + + rtest.OK(t, r.LoadIndex(ctx, nil)) + treePacks := restic.NewIDSet() + r.Index().Each(ctx, func(pb restic.PackedBlob) { + if pb.Type == restic.TreeBlob { + treePacks.Insert(pb.PackID) + } + }) + + return treePacks +} + +func removePacks(gopts GlobalOptions, t testing.TB, remove restic.IDSet) { + ctx, r, unlock, err := openWithExclusiveLock(context.TODO(), gopts, false) + rtest.OK(t, err) + defer unlock() for id := range remove { - rtest.OK(t, r.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()})) + rtest.OK(t, r.Backend().Remove(ctx, backend.Handle{Type: restic.PackFile, Name: id.String()})) } } func removePacksExcept(gopts GlobalOptions, t testing.TB, keep restic.IDSet, removeTreePacks bool) { - r, err := OpenRepository(context.TODO(), gopts) + ctx, r, unlock, err := openWithExclusiveLock(context.TODO(), gopts, false) rtest.OK(t, err) + defer unlock() // Get all tree packs - rtest.OK(t, r.LoadIndex(context.TODO(), nil)) + rtest.OK(t, r.LoadIndex(ctx, nil)) treePacks := restic.NewIDSet() - r.Index().Each(context.TODO(), func(pb restic.PackedBlob) { + r.Index().Each(ctx, func(pb restic.PackedBlob) { if pb.Type == restic.TreeBlob { treePacks.Insert(pb.PackID) } }) // remove all packs containing data blobs - rtest.OK(t, r.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error { + rtest.OK(t, r.List(ctx, restic.PackFile, func(id restic.ID, size int64) error { if treePacks.Has(id) != removeTreePacks || keep.Has(id) { return nil } - return r.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()}) + return r.Backend().Remove(ctx, backend.Handle{Type: restic.PackFile, Name: id.String()}) })) } diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go index 7cf8396a3..21be571e2 100644 --- a/cmd/restic/integration_test.go +++ b/cmd/restic/integration_test.go @@ -154,12 +154,13 @@ func TestFindListOnce(t *testing.T) { testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts) thirdSnapshot := restic.NewIDSet(testListSnapshots(t, env.gopts, 3)...) - repo, err := OpenRepository(context.TODO(), env.gopts) + ctx, repo, unlock, err := openWithReadLock(context.TODO(), env.gopts, false) rtest.OK(t, err) + defer unlock() snapshotIDs := restic.NewIDSet() // specify the two oldest snapshots explicitly and use "latest" to reference the newest one - for sn := range FindFilteredSnapshots(context.TODO(), repo, repo, &restic.SnapshotFilter{}, []string{ + for sn := range FindFilteredSnapshots(ctx, repo, repo, &restic.SnapshotFilter{}, []string{ secondSnapshot[0].String(), secondSnapshot[1].String()[:8], "latest", From 5e98f1e2eb49ef9dd84030611d6bc557b646d844 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 28 Mar 2024 23:14:32 +0100 Subject: [PATCH 09/10] repository: fix test setup race conditions --- internal/repository/key.go | 16 ++++++++-------- internal/repository/testing.go | 19 ++++++++++++------- internal/restic/config.go | 6 +++++- 3 files changed, 25 insertions(+), 16 deletions(-) diff --git a/internal/repository/key.go b/internal/repository/key.go index 4d597da4d..0604b44df 100644 --- a/internal/repository/key.go +++ b/internal/repository/key.go @@ -43,9 +43,9 @@ type Key struct { id restic.ID } -// Params tracks the parameters used for the KDF. If not set, it will be +// params tracks the parameters used for the KDF. If not set, it will be // calibrated on the first run of AddKey(). -var Params *crypto.Params +var params *crypto.Params const ( // KDFTimeout specifies the maximum runtime for the KDF. @@ -196,13 +196,13 @@ func LoadKey(ctx context.Context, s *Repository, id restic.ID) (k *Key, err erro // AddKey adds a new key to an already existing repository. func AddKey(ctx context.Context, s *Repository, password, username, hostname string, template *crypto.Key) (*Key, error) { // make sure we have valid KDF parameters - if Params == nil { + if params == nil { p, err := crypto.Calibrate(KDFTimeout, KDFMemory) if err != nil { return nil, errors.Wrap(err, "Calibrate") } - Params = &p + params = &p debug.Log("calibrated KDF parameters are %v", p) } @@ -213,9 +213,9 @@ func AddKey(ctx context.Context, s *Repository, password, username, hostname str Hostname: hostname, KDF: "scrypt", - N: Params.N, - R: Params.R, - P: Params.P, + N: params.N, + R: params.R, + P: params.P, } if newkey.Hostname == "" { @@ -237,7 +237,7 @@ func AddKey(ctx context.Context, s *Repository, password, username, hostname str } // call KDF to derive user key - newkey.user, err = crypto.KDF(*Params, newkey.Salt, password) + newkey.user, err = crypto.KDF(*params, newkey.Salt, password) if err != nil { return nil, err } diff --git a/internal/repository/testing.go b/internal/repository/testing.go index 3a566565f..874d179ce 100644 --- a/internal/repository/testing.go +++ b/internal/repository/testing.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "os" + "sync" "testing" "github.com/restic/restic/internal/backend" @@ -21,14 +22,18 @@ type logger interface { Logf(format string, args ...interface{}) } +var paramsOnce sync.Once + // TestUseLowSecurityKDFParameters configures low-security KDF parameters for testing. func TestUseLowSecurityKDFParameters(t logger) { t.Logf("using low-security KDF parameters for test") - Params = &crypto.Params{ - N: 128, - R: 1, - P: 1, - } + paramsOnce.Do(func() { + params = &crypto.Params{ + N: 128, + R: 1, + P: 1, + } + }) } // TestBackend returns a fully configured in-memory backend. @@ -36,7 +41,7 @@ func TestBackend(_ testing.TB) backend.Backend { return mem.New() } -const TestChunkerPol = chunker.Pol(0x3DA3358B4DC173) +const testChunkerPol = chunker.Pol(0x3DA3358B4DC173) // TestRepositoryWithBackend returns a repository initialized with a test // password. If be is nil, an in-memory backend is used. A constant polynomial @@ -55,7 +60,7 @@ func TestRepositoryWithBackend(t testing.TB, be backend.Backend, version uint, o t.Fatalf("TestRepository(): new repo failed: %v", err) } - cfg := restic.TestCreateConfig(t, TestChunkerPol, version) + cfg := restic.TestCreateConfig(t, testChunkerPol, version) err = repo.init(context.TODO(), test.TestPassword, cfg) if err != nil { t.Fatalf("TestRepository(): initialize repo failed: %v", err) diff --git a/internal/restic/config.go b/internal/restic/config.go index 67ee190bc..67af259ba 100644 --- a/internal/restic/config.go +++ b/internal/restic/config.go @@ -2,6 +2,7 @@ package restic import ( "context" + "sync" "testing" "github.com/restic/restic/internal/errors" @@ -67,12 +68,15 @@ func TestCreateConfig(t testing.TB, pol chunker.Pol, version uint) (cfg Config) } var checkPolynomial = true +var checkPolynomialOnce sync.Once // TestDisableCheckPolynomial disables the check that the polynomial used for // the chunker. func TestDisableCheckPolynomial(t testing.TB) { t.Logf("disabling check of the chunker polynomial") - checkPolynomial = false + checkPolynomialOnce.Do(func() { + checkPolynomial = false + }) } // LoadConfig returns loads, checks and returns the config for a repository. From 07eb6c315b34bac8d698be41fecd0c26d542bb49 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 28 Mar 2024 23:46:58 +0100 Subject: [PATCH 10/10] add changelog for locking refactor --- changelog/unreleased/pull-4709 | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 changelog/unreleased/pull-4709 diff --git a/changelog/unreleased/pull-4709 b/changelog/unreleased/pull-4709 new file mode 100644 index 000000000..5ffb2a6a6 --- /dev/null +++ b/changelog/unreleased/pull-4709 @@ -0,0 +1,10 @@ +Bugfix: Correct `--no-lock` handling of `ls` and `tag` command + +The `ls` command never locked the repository. This has been fixed. The old +behavior is still supported using `ls --no-lock`. The latter invocation also +works with older restic versions. + +The `tag` command erroneously accepted the `--no-lock` command. The command +now always requires an exclusive lock. + +https://github.com/restic/restic/pull/4709