Improve command shutdown on context cancellation

This commit is contained in:
Michael Eischer 2024-03-30 00:19:58 +01:00
parent 910927670f
commit 31624aeffd
17 changed files with 80 additions and 6 deletions

View File

@ -219,6 +219,9 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
Verbosef("load indexes\n") Verbosef("load indexes\n")
bar := newIndexProgress(gopts.Quiet, gopts.JSON) bar := newIndexProgress(gopts.Quiet, gopts.JSON)
hints, errs := chkr.LoadIndex(ctx, bar) hints, errs := chkr.LoadIndex(ctx, bar)
if ctx.Err() != nil {
return ctx.Err()
}
errorsFound := false errorsFound := false
suggestIndexRebuild := false suggestIndexRebuild := false
@ -280,6 +283,9 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
if orphanedPacks > 0 { if orphanedPacks > 0 {
Verbosef("%d additional files were found in the repo, which likely contain duplicate data.\nThis is non-critical, you can run `restic prune` to correct this.\n", orphanedPacks) Verbosef("%d additional files were found in the repo, which likely contain duplicate data.\nThis is non-critical, you can run `restic prune` to correct this.\n", orphanedPacks)
} }
if ctx.Err() != nil {
return ctx.Err()
}
Verbosef("check snapshots, trees and blobs\n") Verbosef("check snapshots, trees and blobs\n")
errChan = make(chan error) errChan = make(chan error)
@ -313,6 +319,9 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
// Must happen after `errChan` is read from in the above loop to avoid // Must happen after `errChan` is read from in the above loop to avoid
// deadlocking in the case of errors. // deadlocking in the case of errors.
wg.Wait() wg.Wait()
if ctx.Err() != nil {
return ctx.Err()
}
if opts.CheckUnused { if opts.CheckUnused {
for _, id := range chkr.UnusedBlobs(ctx) { for _, id := range chkr.UnusedBlobs(ctx) {
@ -392,10 +401,13 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
doReadData(packs) doReadData(packs)
} }
if ctx.Err() != nil {
return ctx.Err()
}
if errorsFound { if errorsFound {
return errors.Fatal("repository contains errors") return errors.Fatal("repository contains errors")
} }
Verbosef("no errors were found\n") Verbosef("no errors were found\n")
return nil return nil

View File

@ -103,6 +103,9 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []
// also consider identical snapshot copies // also consider identical snapshot copies
dstSnapshotByOriginal[*sn.ID()] = append(dstSnapshotByOriginal[*sn.ID()], sn) dstSnapshotByOriginal[*sn.ID()] = append(dstSnapshotByOriginal[*sn.ID()], sn)
} }
if ctx.Err() != nil {
return ctx.Err()
}
// remember already processed trees across all snapshots // remember already processed trees across all snapshots
visitedTrees := restic.NewIDSet() visitedTrees := restic.NewIDSet()
@ -147,7 +150,7 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []
} }
Verbosef("snapshot %s saved\n", newID.Str()) Verbosef("snapshot %s saved\n", newID.Str())
} }
return nil return ctx.Err()
} }
func similarSnapshots(sna *restic.Snapshot, snb *restic.Snapshot) bool { func similarSnapshots(sna *restic.Snapshot, snb *restic.Snapshot) bool {

View File

@ -608,6 +608,9 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args []
for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, opts.Snapshots) { for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, opts.Snapshots) {
filteredSnapshots = append(filteredSnapshots, sn) filteredSnapshots = append(filteredSnapshots, sn)
} }
if ctx.Err() != nil {
return ctx.Err()
}
sort.Slice(filteredSnapshots, func(i, j int) bool { sort.Slice(filteredSnapshots, func(i, j int) bool {
return filteredSnapshots[i].Time.Before(filteredSnapshots[j].Time) return filteredSnapshots[i].Time.Before(filteredSnapshots[j].Time)

View File

@ -188,6 +188,9 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption
for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) { for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) {
snapshots = append(snapshots, sn) snapshots = append(snapshots, sn)
} }
if ctx.Err() != nil {
return ctx.Err()
}
var jsonGroups []*ForgetGroup var jsonGroups []*ForgetGroup
@ -270,6 +273,10 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption
} }
} }
if ctx.Err() != nil {
return ctx.Err()
}
if len(removeSnIDs) > 0 { if len(removeSnIDs) > 0 {
if !opts.DryRun { if !opts.DryRun {
bar := printer.NewCounter("files deleted") bar := printer.NewCounter("files deleted")

View File

@ -197,6 +197,9 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption
if err != nil { if err != nil {
return err return err
} }
if ctx.Err() != nil {
return ctx.Err()
}
if popts.DryRun { if popts.DryRun {
printer.P("\nWould have made the following changes:") printer.P("\nWould have made the following changes:")

View File

@ -66,11 +66,17 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error {
trees[blob.Blob.ID] = false trees[blob.Blob.ID] = false
} }
}) })
if ctx.Err() != nil {
return ctx.Err()
}
Verbosef("load %d trees\n", len(trees)) Verbosef("load %d trees\n", len(trees))
bar = newProgressMax(!gopts.Quiet, uint64(len(trees)), "trees loaded") bar = newProgressMax(!gopts.Quiet, uint64(len(trees)), "trees loaded")
for id := range trees { for id := range trees {
tree, err := restic.LoadTree(ctx, repo, id) tree, err := restic.LoadTree(ctx, repo, id)
if ctx.Err() != nil {
return ctx.Err()
}
if err != nil { if err != nil {
Warnf("unable to load tree %v: %v\n", id.Str(), err) Warnf("unable to load tree %v: %v\n", id.Str(), err)
continue continue

View File

@ -145,6 +145,9 @@ func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOpt
changedCount++ changedCount++
} }
} }
if ctx.Err() != nil {
return ctx.Err()
}
Verbosef("\n") Verbosef("\n")
if changedCount == 0 { if changedCount == 0 {

View File

@ -294,6 +294,9 @@ func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, a
changedCount++ changedCount++
} }
} }
if ctx.Err() != nil {
return ctx.Err()
}
Verbosef("\n") Verbosef("\n")
if changedCount == 0 { if changedCount == 0 {

View File

@ -69,6 +69,9 @@ func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions
for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) { for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) {
snapshots = append(snapshots, sn) snapshots = append(snapshots, sn)
} }
if ctx.Err() != nil {
return ctx.Err()
}
snapshotGroups, grouped, err := restic.GroupSnapshots(snapshots, opts.GroupBy) snapshotGroups, grouped, err := restic.GroupSnapshots(snapshots, opts.GroupBy)
if err != nil { if err != nil {
return err return err

View File

@ -117,9 +117,8 @@ func runStats(ctx context.Context, opts StatsOptions, gopts GlobalOptions, args
return fmt.Errorf("error walking snapshot: %v", err) return fmt.Errorf("error walking snapshot: %v", err)
} }
} }
if ctx.Err() != nil {
if err != nil { return ctx.Err()
return err
} }
if opts.countMode == countModeRawData { if opts.countMode == countModeRawData {

View File

@ -122,6 +122,9 @@ func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, args []st
changeCnt++ changeCnt++
} }
} }
if ctx.Err() != nil {
return ctx.Err()
}
if changeCnt == 0 { if changeCnt == 0 {
Verbosef("no snapshots were modified\n") Verbosef("no snapshots were modified\n")
} else { } else {

View File

@ -380,6 +380,7 @@ func (fn *FutureNode) take(ctx context.Context) futureNodeResult {
return res return res
} }
case <-ctx.Done(): case <-ctx.Done():
return futureNodeResult{err: ctx.Err()}
} }
return futureNodeResult{err: errors.Errorf("no result")} return futureNodeResult{err: errors.Errorf("no result")}
} }

View File

@ -90,6 +90,10 @@ func (s *TreeSaver) save(ctx context.Context, job *saveTreeJob) (*restic.Node, I
// return the error if it wasn't ignored // return the error if it wasn't ignored
if fnr.err != nil { if fnr.err != nil {
debug.Log("err for %v: %v", fnr.snPath, fnr.err) debug.Log("err for %v: %v", fnr.snPath, fnr.err)
if fnr.err == context.Canceled {
return nil, stats, fnr.err
}
fnr.err = s.errFn(fnr.target, fnr.err) fnr.err = s.errFn(fnr.target, fnr.err)
if fnr.err == nil { if fnr.err == nil {
// ignore error // ignore error

View File

@ -320,6 +320,9 @@ func (mi *MasterIndex) Save(ctx context.Context, repo restic.Repository, exclude
newIndex = NewIndex() newIndex = NewIndex()
} }
} }
if wgCtx.Err() != nil {
return wgCtx.Err()
}
} }
err := newIndex.AddToSupersedes(extraObsolete...) err := newIndex.AddToSupersedes(extraObsolete...)

View File

@ -130,6 +130,9 @@ func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, g
} }
keepBlobs.Delete(blob.BlobHandle) keepBlobs.Delete(blob.BlobHandle)
}) })
if ctx.Err() != nil {
return nil, ctx.Err()
}
if keepBlobs.Len() < blobCount/2 { if keepBlobs.Len() < blobCount/2 {
// replace with copy to shrink map to necessary size if there's a chance to benefit // replace with copy to shrink map to necessary size if there's a chance to benefit
@ -166,6 +169,9 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re
usedBlobs[bh] = count usedBlobs[bh] = count
} }
}) })
if ctx.Err() != nil {
return nil, nil, ctx.Err()
}
// Check if all used blobs have been found in index // Check if all used blobs have been found in index
missingBlobs := restic.NewBlobSet() missingBlobs := restic.NewBlobSet()
@ -240,6 +246,9 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re
// update indexPack // update indexPack
indexPack[blob.PackID] = ip indexPack[blob.PackID] = ip
}) })
if ctx.Err() != nil {
return nil, nil, ctx.Err()
}
// if duplicate blobs exist, those will be set to either "used" or "unused": // if duplicate blobs exist, those will be set to either "used" or "unused":
// - mark only one occurrence of duplicate blobs as used // - mark only one occurrence of duplicate blobs as used
@ -286,6 +295,9 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re
indexPack[blob.PackID] = ip indexPack[blob.PackID] = ip
}) })
} }
if ctx.Err() != nil {
return nil, nil, ctx.Err()
}
// Sanity check. If no duplicates exist, all blobs have value 1. After handling // Sanity check. If no duplicates exist, all blobs have value 1. After handling
// duplicates, this also applies to duplicates. // duplicates, this also applies to duplicates.
@ -528,6 +540,9 @@ func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) (e
printer.P("deleting unreferenced packs\n") printer.P("deleting unreferenced packs\n")
_ = deleteFiles(ctx, true, repo, plan.removePacksFirst, restic.PackFile, printer) _ = deleteFiles(ctx, true, repo, plan.removePacksFirst, restic.PackFile, printer)
} }
if ctx.Err() != nil {
return ctx.Err()
}
if len(plan.repackPacks) != 0 { if len(plan.repackPacks) != 0 {
printer.P("repacking packs\n") printer.P("repacking packs\n")
@ -578,6 +593,9 @@ func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) (e
printer.P("removing %d old packs\n", len(plan.removePacks)) printer.P("removing %d old packs\n", len(plan.removePacks))
_ = deleteFiles(ctx, true, repo, plan.removePacks, restic.PackFile, printer) _ = deleteFiles(ctx, true, repo, plan.removePacks, restic.PackFile, printer)
} }
if ctx.Err() != nil {
return ctx.Err()
}
if plan.opts.UnsafeRecovery { if plan.opts.UnsafeRecovery {
err = rebuildIndexFiles(ctx, repo, plan.ignorePacks, nil, true, printer) err = rebuildIndexFiles(ctx, repo, plan.ignorePacks, nil, true, printer)

View File

@ -72,7 +72,7 @@ func repack(ctx context.Context, repo restic.Repository, dstRepo restic.Reposito
return wgCtx.Err() return wgCtx.Err()
} }
} }
return nil return wgCtx.Err()
}) })
worker := func() error { worker := func() error {

View File

@ -713,6 +713,9 @@ func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error {
return errors.New("index uses feature not supported by repository version 1") return errors.New("index uses feature not supported by repository version 1")
} }
} }
if ctx.Err() != nil {
return ctx.Err()
}
// remove index files from the cache which have been removed in the repo // remove index files from the cache which have been removed in the repo
return r.prepareCache() return r.prepareCache()