1
0
mirror of https://github.com/restic/restic.git synced 2024-06-28 08:00:52 +02:00

Change backup policy to be inclusive, meaning all given policies

are evaluated for each snapshot, thereby making sure that each
keep-* is able to retain its most recent snapshot. Thereby insuring
that weeklies keep Sundays around and monthlies keep the last day of
the month around.

Added testcase to make sure when multiple --keep-tags are given,
ALL of them need to match.
This commit is contained in:
Pauline Middelink 2017-05-11 22:26:44 +02:00
parent ce3acbd30a
commit 929f90344e
13 changed files with 154 additions and 239 deletions

View File

@ -788,6 +788,9 @@ All the ``--keep-*`` options above only count
hours/days/weeks/months/years which have a snapshot, so those without a hours/days/weeks/months/years which have a snapshot, so those without a
snapshot are ignored. snapshot are ignored.
All snapshots are evaluated counted against all matching keep-* counts. A
single snapshot on 30-09-2017 (Sun) will count as a daily, weekly and monthly.
Let's explain this with an example: Suppose you have only made a backup Let's explain this with an example: Suppose you have only made a backup
on each Sunday for 12 weeks. Then ``forget --keep-daily 4`` will keep on each Sunday for 12 weeks. Then ``forget --keep-daily 4`` will keep
the last four snapshots for the last four Sundays, but remove the rest. the last four snapshots for the last four Sundays, but remove the rest.
@ -796,6 +799,14 @@ is a safety feature: it prevents restic from removing many snapshots
when no new ones are created. If it was implemented otherwise, running when no new ones are created. If it was implemented otherwise, running
``forget --keep-daily 4`` on a Friday would remove all snapshots! ``forget --keep-daily 4`` on a Friday would remove all snapshots!
Another example: Suppose you make daily backups for 100 years. Then
``forget --keep-daily 7 --keep-weekly 5 --keep-monthly 12 --keep-yearly 75``
will keep the most recent 7 daily snapshots, then 4 (remember, 7 dailies
already include a week!) last-day-of-the-weeks and 11 or 12
last-day-of-the-months. (11 or 12 depends if the 5 weeklies cross a month).
And ofcourse 75 last-day-of-the-year snapshots. All other snapshots are
removed.
Autocompletion Autocompletion
-------------- --------------

View File

@ -1,7 +1,6 @@
package restic package restic
import ( import (
"fmt"
"reflect" "reflect"
"sort" "sort"
"time" "time"
@ -52,18 +51,6 @@ func (e ExpirePolicy) Empty() bool {
return reflect.DeepEqual(e, empty) return reflect.DeepEqual(e, empty)
} }
// filter is used to split a list of snapshots into those to keep and those to
// remove according to a policy.
type filter struct {
Unprocessed Snapshots
Remove Snapshots
Keep Snapshots
}
func (f filter) String() string {
return fmt.Sprintf("<filter %d todo, %d keep, %d remove>", len(f.Unprocessed), len(f.Keep), len(f.Remove))
}
// ymdh returns an integer in the form YYYYMMDDHH. // ymdh returns an integer in the form YYYYMMDDHH.
func ymdh(d time.Time) int { func ymdh(d time.Time) int {
return d.Year()*1000000 + int(d.Month())*10000 + d.Day()*100 + d.Hour() return d.Year()*1000000 + int(d.Month())*10000 + d.Day()*100 + d.Hour()
@ -90,84 +77,16 @@ func y(d time.Time) int {
return d.Year() return d.Year()
} }
// apply moves snapshots from Unprocess to either Keep or Remove. It sorts the var a int
// snapshots into buckets according to the return value of fn, and then moves
// the newest snapshot in each bucket to Keep and all others to Remove. When
// max snapshots were found, processing stops.
func (f *filter) apply(fn func(time.Time) int, max int) {
if max == 0 || len(f.Unprocessed) == 0 {
return
}
sameBucket := Snapshots{} // always retuns a unique number for d.
lastBucket := fn(f.Unprocessed[0].Time) func always(d time.Time) int {
a++
for len(f.Unprocessed) > 0 { return a
cur := f.Unprocessed[0]
bucket := fn(cur.Time)
// if the snapshots are from a new bucket, forget all but the first
// (=last in time) snapshot from the previous bucket.
if bucket != lastBucket {
f.Keep = append(f.Keep, sameBucket[0])
f.Remove = append(f.Remove, sameBucket[1:]...)
sameBucket = Snapshots{}
lastBucket = bucket
max--
if max == 0 {
return
}
}
// collect all snapshots for the current bucket
sameBucket = append(sameBucket, cur)
f.Unprocessed = f.Unprocessed[1:]
}
// if we have leftovers, process them too.
if len(sameBucket) > 0 {
f.Keep = append(f.Keep, sameBucket[0])
f.Remove = append(f.Remove, sameBucket[1:]...)
}
} }
// keepTags marks the snapshots which have all tags as to be kept. // ApplyPolicy returns the snapshots from list that are to be kept and removed
func (f *filter) keepTags(tags []string) { // according to the policy p. list is sorted in the process.
if len(tags) == 0 {
return
}
unprocessed := f.Unprocessed[:0]
for _, sn := range f.Unprocessed {
if sn.HasTags(tags) {
f.Keep = append(f.Keep, sn)
continue
}
unprocessed = append(unprocessed, sn)
}
f.Unprocessed = unprocessed
}
// keepLast marks the last n snapshots as to be kept.
func (f *filter) keepLast(n int) {
if n > len(f.Unprocessed) {
n = len(f.Unprocessed)
}
f.Keep = append(f.Keep, f.Unprocessed[:n]...)
f.Unprocessed = f.Unprocessed[n:]
}
// finish moves all remaining snapshots to remove.
func (f *filter) finish() {
f.Remove = append(f.Remove, f.Unprocessed...)
}
// ApplyPolicy runs returns the snapshots from s that are to be deleted according
// to the policy p. s is sorted in the process.
func ApplyPolicy(list Snapshots, p ExpirePolicy) (keep, remove Snapshots) { func ApplyPolicy(list Snapshots, p ExpirePolicy) (keep, remove Snapshots) {
sort.Sort(list) sort.Sort(list)
@ -179,20 +98,46 @@ func ApplyPolicy(list Snapshots, p ExpirePolicy) (keep, remove Snapshots) {
return list, remove return list, remove
} }
f := filter{ var buckets = [6]struct {
Unprocessed: list, Count int
Remove: Snapshots{}, bucker func(d time.Time) int
Keep: Snapshots{}, Last int
}{
{p.Last, always, -1},
{p.Hourly, ymdh, -1},
{p.Daily, ymd, -1},
{p.Weekly, yw, -1},
{p.Monthly, ym, -1},
{p.Yearly, y, -1},
} }
f.keepTags(p.Tags) for _, cur := range list {
f.keepLast(p.Last) var keep_snap bool
f.apply(ymdh, p.Hourly)
f.apply(ymd, p.Daily)
f.apply(yw, p.Weekly)
f.apply(ym, p.Monthly)
f.apply(y, p.Yearly)
f.finish()
return f.Keep, f.Remove // Tags are handled specially as they are not counted.
if len(p.Tags) > 0 {
if cur.HasTags(p.Tags) {
keep_snap = true
}
}
// Now update the other buckets and see if they have some counts left.
for i, b := range buckets {
if b.Count > 0 {
val := b.bucker(cur.Time)
if val != b.Last {
keep_snap = true
buckets[i].Last = val
buckets[i].Count--
}
}
}
if keep_snap {
keep = append(keep, cur)
} else {
remove = append(remove, cur)
}
}
return keep, remove
} }

View File

@ -77,8 +77,8 @@ var testExpireSnapshots = restic.Snapshots{
{Time: parseTimeUTC("2014-11-10 10:20:30"), Tags: []string{"foo"}}, {Time: parseTimeUTC("2014-11-10 10:20:30"), Tags: []string{"foo"}},
{Time: parseTimeUTC("2014-11-12 10:20:30"), Tags: []string{"foo"}}, {Time: parseTimeUTC("2014-11-12 10:20:30"), Tags: []string{"foo"}},
{Time: parseTimeUTC("2014-11-13 10:20:30"), Tags: []string{"foo"}}, {Time: parseTimeUTC("2014-11-13 10:20:30"), Tags: []string{"foo"}},
{Time: parseTimeUTC("2014-11-13 10:20:30")}, {Time: parseTimeUTC("2014-11-13 10:20:30"), Tags: []string{"bar"}},
{Time: parseTimeUTC("2014-11-15 10:20:30")}, {Time: parseTimeUTC("2014-11-15 10:20:30"), Tags: []string{"foo", "bar"}},
{Time: parseTimeUTC("2014-11-18 10:20:30")}, {Time: parseTimeUTC("2014-11-18 10:20:30")},
{Time: parseTimeUTC("2014-11-20 10:20:30")}, {Time: parseTimeUTC("2014-11-20 10:20:30")},
{Time: parseTimeUTC("2014-11-21 10:20:30")}, {Time: parseTimeUTC("2014-11-21 10:20:30")},
@ -164,57 +164,58 @@ var expireTests = []restic.ExpirePolicy{
{Yearly: 10}, {Yearly: 10},
{Daily: 7, Weekly: 2, Monthly: 3, Yearly: 10}, {Daily: 7, Weekly: 2, Monthly: 3, Yearly: 10},
{Tags: []string{"foo"}}, {Tags: []string{"foo"}},
{Tags: []string{"foo", "bar"}},
} }
func TestApplyPolicy(t *testing.T) { func TestApplyPolicy(t *testing.T) {
for i, p := range expireTests { for i, p := range expireTests {
keep, remove := restic.ApplyPolicy(testExpireSnapshots, p) t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
keep, remove := restic.ApplyPolicy(testExpireSnapshots, p)
t.Logf("test %d: returned keep %v, remove %v (of %v) expired snapshots for policy %v", t.Logf("test %d: returned keep %v, remove %v (of %v) expired snapshots for policy %v",
i, len(keep), len(remove), len(testExpireSnapshots), p) i, len(keep), len(remove), len(testExpireSnapshots), p)
if len(keep)+len(remove) != len(testExpireSnapshots) { if len(keep)+len(remove) != len(testExpireSnapshots) {
t.Errorf("test %d: len(keep)+len(remove) = %d != len(testExpireSnapshots) = %d", t.Errorf("test %d: len(keep)+len(remove) = %d != len(testExpireSnapshots) = %d",
i, len(keep)+len(remove), len(testExpireSnapshots)) i, len(keep)+len(remove), len(testExpireSnapshots))
} }
if p.Sum() > 0 && len(keep) > p.Sum() { if p.Sum() > 0 && len(keep) > p.Sum() {
t.Errorf("not enough snapshots removed: policy allows %v snapshots to remain, but ended up with %v", t.Errorf("not enough snapshots removed: policy allows %v snapshots to remain, but ended up with %v",
p.Sum(), len(keep)) p.Sum(), len(keep))
} }
for _, sn := range keep { for _, sn := range keep {
t.Logf("test %d: keep snapshot at %v %s\n", i, sn.Time, sn.Tags) t.Logf("test %d: keep snapshot at %v %s\n", i, sn.Time, sn.Tags)
} }
for _, sn := range remove { for _, sn := range remove {
t.Logf("test %d: forget snapshot at %v %s\n", i, sn.Time, sn.Tags) t.Logf("test %d: forget snapshot at %v %s\n", i, sn.Time, sn.Tags)
} }
goldenFilename := filepath.Join("testdata", fmt.Sprintf("policy_keep_snapshots_%d", i)) goldenFilename := filepath.Join("testdata", fmt.Sprintf("policy_keep_snapshots_%d", i))
if *updateGoldenFiles { if *updateGoldenFiles {
buf, err := json.MarshalIndent(keep, "", " ") buf, err := json.MarshalIndent(keep, "", " ")
if err != nil {
t.Fatalf("error marshaling result: %v", err)
}
if err = ioutil.WriteFile(goldenFilename, buf, 0644); err != nil {
t.Fatalf("unable to update golden file: %v", err)
}
}
buf, err := ioutil.ReadFile(goldenFilename)
if err != nil { if err != nil {
t.Fatalf("error marshaling result: %v", err) t.Fatalf("error loading golden file %v: %v", goldenFilename, err)
} }
if err = ioutil.WriteFile(goldenFilename, buf, 0644); err != nil { var want restic.Snapshots
t.Fatalf("unable to update golden file: %v", err) err = json.Unmarshal(buf, &want)
if !reflect.DeepEqual(keep, want) {
t.Fatalf("test %v: wrong result, want:\n %v\ngot:\n %v", i, want, keep)
} }
} })
buf, err := ioutil.ReadFile(goldenFilename)
if err != nil {
t.Errorf("error loading golden file %v: %v", goldenFilename, err)
continue
}
var want restic.Snapshots
err = json.Unmarshal(buf, &want)
if !reflect.DeepEqual(keep, want) {
t.Errorf("test %v: wrong result, want:\n %v\ngot:\n %v", i, want, keep)
continue
}
} }
} }

View File

@ -317,12 +317,19 @@
{ {
"time": "2014-11-15T10:20:30Z", "time": "2014-11-15T10:20:30Z",
"tree": null, "tree": null,
"paths": null "paths": null,
"tags": [
"foo",
"bar"
]
}, },
{ {
"time": "2014-11-13T10:20:30Z", "time": "2014-11-13T10:20:30Z",
"tree": null, "tree": null,
"paths": null "paths": null,
"tags": [
"bar"
]
}, },
{ {
"time": "2014-11-13T10:20:30Z", "time": "2014-11-13T10:20:30Z",
@ -536,4 +543,4 @@
"tree": null, "tree": null,
"paths": null "paths": null
} }
] ]

View File

@ -9,11 +9,6 @@
"tree": null, "tree": null,
"paths": null "paths": null
}, },
{
"time": "2016-01-12T21:02:03Z",
"tree": null,
"paths": null
},
{ {
"time": "2016-01-09T21:02:03Z", "time": "2016-01-09T21:02:03Z",
"tree": null, "tree": null,
@ -53,10 +48,5 @@
"time": "2016-01-01T07:08:03Z", "time": "2016-01-01T07:08:03Z",
"tree": null, "tree": null,
"paths": null "paths": null
},
{
"time": "2015-11-22T10:20:30Z",
"tree": null,
"paths": null
} }
] ]

View File

@ -14,24 +14,9 @@
"tree": null, "tree": null,
"paths": null "paths": null
}, },
{
"time": "2016-01-08T20:02:03Z",
"tree": null,
"paths": null
},
{ {
"time": "2016-01-03T07:02:03Z", "time": "2016-01-03T07:02:03Z",
"tree": null, "tree": null,
"paths": null "paths": null
},
{
"time": "2015-11-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-11-15T10:20:30Z",
"tree": null,
"paths": null
} }
] ]

View File

@ -9,16 +9,6 @@
"tree": null, "tree": null,
"paths": null "paths": null
}, },
{
"time": "2016-01-09T21:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-03T07:02:03Z",
"tree": null,
"paths": null
},
{ {
"time": "2015-11-22T10:20:30Z", "time": "2015-11-22T10:20:30Z",
"tree": null, "tree": null,
@ -43,13 +33,5 @@
"time": "2014-11-22T10:20:30Z", "time": "2014-11-22T10:20:30Z",
"tree": null, "tree": null,
"paths": null "paths": null
},
{
"time": "2014-10-22T10:20:30Z",
"tree": null,
"paths": null,
"tags": [
"foo"
]
} }
] ]

View File

@ -34,16 +34,6 @@
"tree": null, "tree": null,
"paths": null "paths": null
}, },
{
"time": "2016-01-04T16:23:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-03T07:02:03Z",
"tree": null,
"paths": null
},
{ {
"time": "2015-11-22T10:20:30Z", "time": "2015-11-22T10:20:30Z",
"tree": null, "tree": null,
@ -54,19 +44,9 @@
"tree": null, "tree": null,
"paths": null "paths": null
}, },
{
"time": "2015-09-22T10:20:30Z",
"tree": null,
"paths": null
},
{
"time": "2015-08-22T10:20:30Z",
"tree": null,
"paths": null
},
{ {
"time": "2014-11-22T10:20:30Z", "time": "2014-11-22T10:20:30Z",
"tree": null, "tree": null,
"paths": null "paths": null
} }
] ]

View File

@ -1,4 +1,13 @@
[ [
{
"time": "2014-11-15T10:20:30Z",
"tree": null,
"paths": null,
"tags": [
"foo",
"bar"
]
},
{ {
"time": "2014-11-13T10:20:30Z", "time": "2014-11-13T10:20:30Z",
"tree": null, "tree": null,
@ -111,4 +120,4 @@
"foo" "foo"
] ]
} }
] ]

View File

@ -0,0 +1,11 @@
[
{
"time": "2014-11-15T10:20:30Z",
"tree": null,
"paths": null,
"tags": [
"foo",
"bar"
]
}
]

View File

@ -317,12 +317,19 @@
{ {
"time": "2014-11-15T10:20:30Z", "time": "2014-11-15T10:20:30Z",
"tree": null, "tree": null,
"paths": null "paths": null,
"tags": [
"foo",
"bar"
]
}, },
{ {
"time": "2014-11-13T10:20:30Z", "time": "2014-11-13T10:20:30Z",
"tree": null, "tree": null,
"paths": null "paths": null,
"tags": [
"bar"
]
}, },
{ {
"time": "2014-11-13T10:20:30Z", "time": "2014-11-13T10:20:30Z",
@ -536,4 +543,4 @@
"tree": null, "tree": null,
"paths": null "paths": null
} }
] ]

View File

@ -317,12 +317,19 @@
{ {
"time": "2014-11-15T10:20:30Z", "time": "2014-11-15T10:20:30Z",
"tree": null, "tree": null,
"paths": null "paths": null,
"tags": [
"foo",
"bar"
]
}, },
{ {
"time": "2014-11-13T10:20:30Z", "time": "2014-11-13T10:20:30Z",
"tree": null, "tree": null,
"paths": null "paths": null,
"tags": [
"bar"
]
}, },
{ {
"time": "2014-11-13T10:20:30Z", "time": "2014-11-13T10:20:30Z",
@ -536,4 +543,4 @@
"tree": null, "tree": null,
"paths": null "paths": null
} }
] ]

View File

@ -28,25 +28,5 @@
"time": "2016-01-07T10:02:03Z", "time": "2016-01-07T10:02:03Z",
"tree": null, "tree": null,
"paths": null "paths": null
},
{
"time": "2016-01-06T08:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-05T09:02:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-04T16:23:03Z",
"tree": null,
"paths": null
},
{
"time": "2016-01-03T07:02:03Z",
"tree": null,
"paths": null
} }
] ]