Merge pull request #2606 from middelink/fix-323

Add copy functionality.
This commit is contained in:
MichaelEischer 2020-08-30 10:18:24 +02:00 committed by GitHub
commit ea81a0e282
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 503 additions and 116 deletions

View File

@ -0,0 +1,13 @@
Enhancement: Add command for copying snapshots between repositories
We've added a copy command, allowing you to copy snapshots from one
repository to another.
Note that this process will have to read (download) and write (upload) the
entire snapshot(s) due to the different encryption keys used on the source
and destination repository. Also, the transferred files are not re-chunked,
which may break deduplication between files already stored in the
destination repo and files copied there using this command.
https://github.com/restic/restic/issues/323
https://github.com/restic/restic/pull/2606

259
cmd/restic/cmd_copy.go Normal file
View File

@ -0,0 +1,259 @@
package main
import (
"context"
"fmt"
"os"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
"github.com/spf13/cobra"
)
var cmdCopy = &cobra.Command{
Use: "copy [flags] [snapshotID ...]",
Short: "Copy snapshots from one repository to another",
Long: `
The "copy" command copies one or more snapshots from one repository to another
repository. Note that this will have to read (download) and write (upload) the
entire snapshot(s) due to the different encryption keys on the source and
destination, and that transferred files are not re-chunked, which may break
their deduplication.
`,
RunE: func(cmd *cobra.Command, args []string) error {
return runCopy(copyOptions, globalOptions, args)
},
}
// CopyOptions bundles all options for the copy command.
type CopyOptions struct {
Repo string
password string
PasswordFile string
PasswordCommand string
KeyHint string
Hosts []string
Tags restic.TagLists
Paths []string
}
var copyOptions CopyOptions
func init() {
cmdRoot.AddCommand(cmdCopy)
f := cmdCopy.Flags()
f.StringVarP(&copyOptions.Repo, "repo2", "", os.Getenv("RESTIC_REPOSITORY2"), "destination repository to copy snapshots to (default: $RESTIC_REPOSITORY2)")
f.StringVarP(&copyOptions.PasswordFile, "password-file2", "", os.Getenv("RESTIC_PASSWORD_FILE2"), "read the destination repository password from a file (default: $RESTIC_PASSWORD_FILE2)")
f.StringVarP(&copyOptions.KeyHint, "key-hint2", "", os.Getenv("RESTIC_KEY_HINT2"), "key ID of key to try decrypting the destination repository first (default: $RESTIC_KEY_HINT2)")
f.StringVarP(&copyOptions.PasswordCommand, "password-command2", "", os.Getenv("RESTIC_PASSWORD_COMMAND2"), "specify a shell command to obtain a password for the destination repository (default: $RESTIC_PASSWORD_COMMAND2)")
f.StringArrayVarP(&copyOptions.Hosts, "host", "H", nil, "only consider snapshots for this `host`, when no snapshot ID is given (can be specified multiple times)")
f.Var(&copyOptions.Tags, "tag", "only consider snapshots which include this `taglist`, when no snapshot ID is given")
f.StringArrayVar(&copyOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path`, when no snapshot ID is given")
}
func runCopy(opts CopyOptions, gopts GlobalOptions, args []string) error {
if opts.Repo == "" {
return errors.Fatal("Please specify a destination repository location (--repo2)")
}
var err error
dstGopts := gopts
dstGopts.Repo = opts.Repo
dstGopts.PasswordFile = opts.PasswordFile
dstGopts.PasswordCommand = opts.PasswordCommand
dstGopts.KeyHint = opts.KeyHint
if opts.password != "" {
dstGopts.password = opts.password
} else {
dstGopts.password, err = resolvePassword(dstGopts, "RESTIC_PASSWORD2")
if err != nil {
return err
}
}
dstGopts.password, err = ReadPassword(dstGopts, "enter password for destination repository: ")
if err != nil {
return err
}
ctx, cancel := context.WithCancel(gopts.ctx)
defer cancel()
srcRepo, err := OpenRepository(gopts)
if err != nil {
return err
}
dstRepo, err := OpenRepository(dstGopts)
if err != nil {
return err
}
srcLock, err := lockRepo(srcRepo)
defer unlockRepo(srcLock)
if err != nil {
return err
}
dstLock, err := lockRepo(dstRepo)
defer unlockRepo(dstLock)
if err != nil {
return err
}
debug.Log("Loading source index")
if err := srcRepo.LoadIndex(ctx); err != nil {
return err
}
debug.Log("Loading destination index")
if err := dstRepo.LoadIndex(ctx); err != nil {
return err
}
dstSnapshotByOriginal := make(map[restic.ID][]*restic.Snapshot)
for sn := range FindFilteredSnapshots(ctx, dstRepo, opts.Hosts, opts.Tags, opts.Paths, nil) {
if sn.Original != nil && !sn.Original.IsNull() {
dstSnapshotByOriginal[*sn.Original] = append(dstSnapshotByOriginal[*sn.Original], sn)
}
// also consider identical snapshot copies
dstSnapshotByOriginal[*sn.ID()] = append(dstSnapshotByOriginal[*sn.ID()], sn)
}
cloner := &treeCloner{
srcRepo: srcRepo,
dstRepo: dstRepo,
visitedTrees: restic.NewIDSet(),
buf: nil,
}
for sn := range FindFilteredSnapshots(ctx, srcRepo, opts.Hosts, opts.Tags, opts.Paths, args) {
Verbosef("\nsnapshot %s of %v at %s)\n", sn.ID().Str(), sn.Paths, sn.Time)
// check whether the destination has a snapshot with the same persistent ID which has similar snapshot fields
srcOriginal := *sn.ID()
if sn.Original != nil {
srcOriginal = *sn.Original
}
if originalSns, ok := dstSnapshotByOriginal[srcOriginal]; ok {
isCopy := false
for _, originalSn := range originalSns {
if similarSnapshots(originalSn, sn) {
Verbosef("skipping source snapshot %s, was already copied to snapshot %s\n", sn.ID().Str(), originalSn.ID().Str())
isCopy = true
break
}
}
if isCopy {
continue
}
}
Verbosef(" copy started, this may take a while...\n")
if err := cloner.copyTree(ctx, *sn.Tree); err != nil {
return err
}
debug.Log("tree copied")
if err = dstRepo.Flush(ctx); err != nil {
return err
}
debug.Log("flushed packs and saved index")
// save snapshot
sn.Parent = nil // Parent does not have relevance in the new repo.
// Use Original as a persistent snapshot ID
if sn.Original == nil {
sn.Original = sn.ID()
}
newID, err := dstRepo.SaveJSONUnpacked(ctx, restic.SnapshotFile, sn)
if err != nil {
return err
}
Verbosef("snapshot %s saved\n", newID.Str())
}
return nil
}
func similarSnapshots(sna *restic.Snapshot, snb *restic.Snapshot) bool {
// everything except Parent and Original must match
if !sna.Time.Equal(snb.Time) || !sna.Tree.Equal(*snb.Tree) || sna.Hostname != snb.Hostname ||
sna.Username != snb.Username || sna.UID != snb.UID || sna.GID != snb.GID ||
len(sna.Paths) != len(snb.Paths) || len(sna.Excludes) != len(snb.Excludes) ||
len(sna.Tags) != len(snb.Tags) {
return false
}
if !sna.HasPaths(snb.Paths) || !sna.HasTags(snb.Tags) {
return false
}
for i, a := range sna.Excludes {
if a != snb.Excludes[i] {
return false
}
}
return true
}
type treeCloner struct {
srcRepo restic.Repository
dstRepo restic.Repository
visitedTrees restic.IDSet
buf []byte
}
func (t *treeCloner) copyTree(ctx context.Context, treeID restic.ID) error {
// We have already processed this tree
if t.visitedTrees.Has(treeID) {
return nil
}
tree, err := t.srcRepo.LoadTree(ctx, treeID)
if err != nil {
return fmt.Errorf("LoadTree(%v) returned error %v", treeID.Str(), err)
}
t.visitedTrees.Insert(treeID)
// Do we already have this tree blob?
if !t.dstRepo.Index().Has(treeID, restic.TreeBlob) {
newTreeID, err := t.dstRepo.SaveTree(ctx, tree)
if err != nil {
return fmt.Errorf("SaveTree(%v) returned error %v", treeID.Str(), err)
}
// Assurance only.
if newTreeID != treeID {
return fmt.Errorf("SaveTree(%v) returned unexpected id %s", treeID.Str(), newTreeID.Str())
}
}
// TODO: parellize this stuff, likely only needed inside a tree.
for _, entry := range tree.Nodes {
// If it is a directory, recurse
if entry.Type == "dir" && entry.Subtree != nil {
if err := t.copyTree(ctx, *entry.Subtree); err != nil {
return err
}
}
// Copy the blobs for this file.
for _, blobID := range entry.Content {
// Do we already have this data blob?
if t.dstRepo.Index().Has(blobID, restic.DataBlob) {
continue
}
debug.Log("Copying blob %s\n", blobID.Str())
t.buf, err = t.srcRepo.LoadBlob(ctx, restic.DataBlob, blobID, t.buf)
if err != nil {
return fmt.Errorf("LoadBlob(%v) returned error %v", blobID, err)
}
_, _, err = t.dstRepo.SaveBlob(ctx, restic.DataBlob, t.buf, blobID, false)
if err != nil {
return fmt.Errorf("SaveBlob(%v) returned error %v", blobID, err)
}
}
}
return nil
}

View File

@ -274,7 +274,7 @@ func Exitf(exitcode int, format string, args ...interface{}) {
}
// resolvePassword determines the password to be used for opening the repository.
func resolvePassword(opts GlobalOptions) (string, error) {
func resolvePassword(opts GlobalOptions, envStr string) (string, error) {
if opts.PasswordFile != "" && opts.PasswordCommand != "" {
return "", errors.Fatalf("Password file and command are mutually exclusive options")
}
@ -299,7 +299,7 @@ func resolvePassword(opts GlobalOptions) (string, error) {
return strings.TrimSpace(string(s)), errors.Wrap(err, "Readfile")
}
if pwd := os.Getenv("RESTIC_PASSWORD"); pwd != "" {
if pwd := os.Getenv(envStr); pwd != "" {
return pwd, nil
}

View File

@ -1,6 +1,7 @@
package main
import (
"bytes"
"context"
"fmt"
"io/ioutil"
@ -75,14 +76,13 @@ func sameModTime(fi1, fi2 os.FileInfo) bool {
return fi1.ModTime().Equal(fi2.ModTime())
}
// directoriesEqualContents checks if both directories contain exactly the same
// contents.
func directoriesEqualContents(dir1, dir2 string) bool {
// directoriesContentsDiff returns a diff between both directories. If these
// contain exactly the same contents, then the diff is an empty string.
func directoriesContentsDiff(dir1, dir2 string) string {
var out bytes.Buffer
ch1 := walkDir(dir1)
ch2 := walkDir(dir2)
changes := false
var a, b *dirEntry
for {
var ok bool
@ -106,36 +106,27 @@ func directoriesEqualContents(dir1, dir2 string) bool {
}
if ch1 == nil {
fmt.Printf("+%v\n", b.path)
changes = true
fmt.Fprintf(&out, "+%v\n", b.path)
} else if ch2 == nil {
fmt.Printf("-%v\n", a.path)
changes = true
} else if !a.equals(b) {
fmt.Fprintf(&out, "-%v\n", a.path)
} else if !a.equals(&out, b) {
if a.path < b.path {
fmt.Printf("-%v\n", a.path)
changes = true
fmt.Fprintf(&out, "-%v\n", a.path)
a = nil
continue
} else if a.path > b.path {
fmt.Printf("+%v\n", b.path)
changes = true
fmt.Fprintf(&out, "+%v\n", b.path)
b = nil
continue
} else {
fmt.Printf("%%%v\n", a.path)
changes = true
fmt.Fprintf(&out, "%%%v\n", a.path)
}
}
a, b = nil, nil
}
if changes {
return false
}
return true
return out.String()
}
type dirStat struct {

View File

@ -4,25 +4,26 @@ package main
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"syscall"
)
func (e *dirEntry) equals(other *dirEntry) bool {
func (e *dirEntry) equals(out io.Writer, other *dirEntry) bool {
if e.path != other.path {
fmt.Fprintf(os.Stderr, "%v: path does not match (%v != %v)\n", e.path, e.path, other.path)
fmt.Fprintf(out, "%v: path does not match (%v != %v)\n", e.path, e.path, other.path)
return false
}
if e.fi.Mode() != other.fi.Mode() {
fmt.Fprintf(os.Stderr, "%v: mode does not match (%v != %v)\n", e.path, e.fi.Mode(), other.fi.Mode())
fmt.Fprintf(out, "%v: mode does not match (%v != %v)\n", e.path, e.fi.Mode(), other.fi.Mode())
return false
}
if !sameModTime(e.fi, other.fi) {
fmt.Fprintf(os.Stderr, "%v: ModTime does not match (%v != %v)\n", e.path, e.fi.ModTime(), other.fi.ModTime())
fmt.Fprintf(out, "%v: ModTime does not match (%v != %v)\n", e.path, e.fi.ModTime(), other.fi.ModTime())
return false
}
@ -30,17 +31,17 @@ func (e *dirEntry) equals(other *dirEntry) bool {
stat2, _ := other.fi.Sys().(*syscall.Stat_t)
if stat.Uid != stat2.Uid {
fmt.Fprintf(os.Stderr, "%v: UID does not match (%v != %v)\n", e.path, stat.Uid, stat2.Uid)
fmt.Fprintf(out, "%v: UID does not match (%v != %v)\n", e.path, stat.Uid, stat2.Uid)
return false
}
if stat.Gid != stat2.Gid {
fmt.Fprintf(os.Stderr, "%v: GID does not match (%v != %v)\n", e.path, stat.Gid, stat2.Gid)
fmt.Fprintf(out, "%v: GID does not match (%v != %v)\n", e.path, stat.Gid, stat2.Gid)
return false
}
if stat.Nlink != stat2.Nlink {
fmt.Fprintf(os.Stderr, "%v: Number of links do not match (%v != %v)\n", e.path, stat.Nlink, stat2.Nlink)
fmt.Fprintf(out, "%v: Number of links do not match (%v != %v)\n", e.path, stat.Nlink, stat2.Nlink)
return false
}

View File

@ -4,23 +4,24 @@ package main
import (
"fmt"
"io"
"io/ioutil"
"os"
)
func (e *dirEntry) equals(other *dirEntry) bool {
func (e *dirEntry) equals(out io.Writer, other *dirEntry) bool {
if e.path != other.path {
fmt.Fprintf(os.Stderr, "%v: path does not match (%v != %v)\n", e.path, e.path, other.path)
fmt.Fprintf(out, "%v: path does not match (%v != %v)\n", e.path, e.path, other.path)
return false
}
if e.fi.Mode() != other.fi.Mode() {
fmt.Fprintf(os.Stderr, "%v: mode does not match (%v != %v)\n", e.path, e.fi.Mode(), other.fi.Mode())
fmt.Fprintf(out, "%v: mode does not match (%v != %v)\n", e.path, e.fi.Mode(), other.fi.Mode())
return false
}
if !sameModTime(e.fi, other.fi) {
fmt.Fprintf(os.Stderr, "%v: ModTime does not match (%v != %v)\n", e.path, e.fi.ModTime(), other.fi.ModTime())
fmt.Fprintf(out, "%v: ModTime does not match (%v != %v)\n", e.path, e.fi.ModTime(), other.fi.ModTime())
return false
}

View File

@ -260,22 +260,18 @@ func testRunPrune(t testing.TB, gopts GlobalOptions) {
rtest.OK(t, runPrune(gopts))
}
func testSetupBackupData(t testing.TB, env *testEnvironment) string {
datafile := filepath.Join("testdata", "backup-data.tar.gz")
testRunInit(t, env.gopts)
rtest.SetupTarTestFixture(t, env.testdata, datafile)
return datafile
}
func TestBackup(t *testing.T) {
env, cleanup := withTestEnvironment(t)
defer cleanup()
datafile := filepath.Join("testdata", "backup-data.tar.gz")
fd, err := os.Open(datafile)
if os.IsNotExist(errors.Cause(err)) {
t.Skipf("unable to find data file %q, skipping", datafile)
return
}
rtest.OK(t, err)
rtest.OK(t, fd.Close())
testRunInit(t, env.gopts)
rtest.SetupTarTestFixture(t, env.testdata, datafile)
testSetupBackupData(t, env)
opts := BackupOptions{}
// first backup
@ -317,9 +313,9 @@ func TestBackup(t *testing.T) {
for i, snapshotID := range snapshotIDs {
restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir)
testRunRestore(t, env.gopts, restoredir, snapshotIDs[0])
rtest.Assert(t, directoriesEqualContents(env.testdata, filepath.Join(restoredir, "testdata")),
"directories are not equal")
testRunRestore(t, env.gopts, restoredir, snapshotID)
diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata"))
rtest.Assert(t, diff == "", "directories are not equal: %v", diff)
}
testRunCheck(t, env.gopts)
@ -329,18 +325,7 @@ func TestBackupNonExistingFile(t *testing.T) {
env, cleanup := withTestEnvironment(t)
defer cleanup()
datafile := filepath.Join("testdata", "backup-data.tar.gz")
fd, err := os.Open(datafile)
if os.IsNotExist(errors.Cause(err)) {
t.Skipf("unable to find data file %q, skipping", datafile)
return
}
rtest.OK(t, err)
rtest.OK(t, fd.Close())
rtest.SetupTarTestFixture(t, env.testdata, datafile)
testRunInit(t, env.gopts)
testSetupBackupData(t, env)
globalOptions.stderr = ioutil.Discard
defer func() {
globalOptions.stderr = os.Stderr
@ -503,11 +488,7 @@ func TestBackupErrors(t *testing.T) {
env, cleanup := withTestEnvironment(t)
defer cleanup()
datafile := filepath.Join("testdata", "backup-data.tar.gz")
rtest.SetupTarTestFixture(t, env.testdata, datafile)
testRunInit(t, env.gopts)
testSetupBackupData(t, env)
// Assume failure
inaccessibleFile := filepath.Join(env.testdata, "0", "0", "9", "0")
@ -596,10 +577,7 @@ func TestBackupTags(t *testing.T) {
env, cleanup := withTestEnvironment(t)
defer cleanup()
datafile := filepath.Join("testdata", "backup-data.tar.gz")
testRunInit(t, env.gopts)
rtest.SetupTarTestFixture(t, env.testdata, datafile)
testSetupBackupData(t, env)
opts := BackupOptions{}
testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
@ -622,6 +600,121 @@ func TestBackupTags(t *testing.T) {
"expected parent to be %v, got %v", parent.ID, newest.Parent)
}
func testRunCopy(t testing.TB, srcGopts GlobalOptions, dstGopts GlobalOptions) {
copyOpts := CopyOptions{
Repo: dstGopts.Repo,
password: dstGopts.password,
}
rtest.OK(t, runCopy(copyOpts, srcGopts, nil))
}
func TestCopy(t *testing.T) {
env, cleanup := withTestEnvironment(t)
defer cleanup()
env2, cleanup2 := withTestEnvironment(t)
defer cleanup2()
testSetupBackupData(t, env)
opts := BackupOptions{}
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
testRunCheck(t, env.gopts)
testRunInit(t, env2.gopts)
testRunCopy(t, env.gopts, env2.gopts)
snapshotIDs := testRunList(t, "snapshots", env.gopts)
copiedSnapshotIDs := testRunList(t, "snapshots", env2.gopts)
// Check that the copies size seems reasonable
rtest.Assert(t, len(snapshotIDs) == len(copiedSnapshotIDs), "expected %v snapshots, found %v",
len(snapshotIDs), len(copiedSnapshotIDs))
stat := dirStats(env.repo)
stat2 := dirStats(env2.repo)
sizeDiff := int64(stat.size) - int64(stat2.size)
if sizeDiff < 0 {
sizeDiff = -sizeDiff
}
rtest.Assert(t, sizeDiff < int64(stat.size)/50, "expected less than 2%% size difference: %v vs. %v",
stat.size, stat2.size)
// Check integrity of the copy
testRunCheck(t, env2.gopts)
// Check that the copied snapshots have the same tree contents as the old ones (= identical tree hash)
origRestores := make(map[string]struct{})
for i, snapshotID := range snapshotIDs {
restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
origRestores[restoredir] = struct{}{}
testRunRestore(t, env.gopts, restoredir, snapshotID)
}
for i, snapshotID := range copiedSnapshotIDs {
restoredir := filepath.Join(env2.base, fmt.Sprintf("restore%d", i))
testRunRestore(t, env2.gopts, restoredir, snapshotID)
foundMatch := false
for cmpdir := range origRestores {
diff := directoriesContentsDiff(restoredir, cmpdir)
if diff == "" {
delete(origRestores, cmpdir)
foundMatch = true
}
}
rtest.Assert(t, foundMatch, "found no counterpart for snapshot %v", snapshotID)
}
rtest.Assert(t, len(origRestores) == 0, "found not copied snapshots")
}
func TestCopyIncremental(t *testing.T) {
env, cleanup := withTestEnvironment(t)
defer cleanup()
env2, cleanup2 := withTestEnvironment(t)
defer cleanup2()
testSetupBackupData(t, env)
opts := BackupOptions{}
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
testRunCheck(t, env.gopts)
testRunInit(t, env2.gopts)
testRunCopy(t, env.gopts, env2.gopts)
snapshotIDs := testRunList(t, "snapshots", env.gopts)
copiedSnapshotIDs := testRunList(t, "snapshots", env2.gopts)
// Check that the copies size seems reasonable
testRunCheck(t, env2.gopts)
rtest.Assert(t, len(snapshotIDs) == len(copiedSnapshotIDs), "expected %v snapshots, found %v",
len(snapshotIDs), len(copiedSnapshotIDs))
// check that no snapshots are copied, as there are no new ones
testRunCopy(t, env.gopts, env2.gopts)
testRunCheck(t, env2.gopts)
copiedSnapshotIDs = testRunList(t, "snapshots", env2.gopts)
rtest.Assert(t, len(snapshotIDs) == len(copiedSnapshotIDs), "still expected %v snapshots, found %v",
len(snapshotIDs), len(copiedSnapshotIDs))
// check that only new snapshots are copied
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
testRunCopy(t, env.gopts, env2.gopts)
testRunCheck(t, env2.gopts)
snapshotIDs = testRunList(t, "snapshots", env.gopts)
copiedSnapshotIDs = testRunList(t, "snapshots", env2.gopts)
rtest.Assert(t, len(snapshotIDs) == len(copiedSnapshotIDs), "still expected %v snapshots, found %v",
len(snapshotIDs), len(copiedSnapshotIDs))
// also test the reverse direction
testRunCopy(t, env2.gopts, env.gopts)
testRunCheck(t, env.gopts)
snapshotIDs = testRunList(t, "snapshots", env.gopts)
rtest.Assert(t, len(snapshotIDs) == len(copiedSnapshotIDs), "still expected %v snapshots, found %v",
len(copiedSnapshotIDs), len(snapshotIDs))
}
func testRunTag(t testing.TB, opts TagOptions, gopts GlobalOptions) {
rtest.OK(t, runTag(opts, gopts, []string{}))
}
@ -630,10 +723,7 @@ func TestTag(t *testing.T) {
env, cleanup := withTestEnvironment(t)
defer cleanup()
datafile := filepath.Join("testdata", "backup-data.tar.gz")
testRunInit(t, env.gopts)
rtest.SetupTarTestFixture(t, env.testdata, datafile)
testSetupBackupData(t, env)
testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
testRunCheck(t, env.gopts)
newest, _ := testRunSnapshots(t, env.gopts)
@ -881,8 +971,8 @@ func TestRestore(t *testing.T) {
restoredir := filepath.Join(env.base, "restore")
testRunRestoreLatest(t, env.gopts, restoredir, nil, nil)
rtest.Assert(t, directoriesEqualContents(env.testdata, filepath.Join(restoredir, filepath.Base(env.testdata))),
"directories are not equal")
diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, filepath.Base(env.testdata)))
rtest.Assert(t, diff == "", "directories are not equal %v", diff)
}
func TestRestoreLatest(t *testing.T) {
@ -1033,10 +1123,7 @@ func TestFind(t *testing.T) {
env, cleanup := withTestEnvironment(t)
defer cleanup()
datafile := filepath.Join("testdata", "backup-data.tar.gz")
testRunInit(t, env.gopts)
rtest.SetupTarTestFixture(t, env.testdata, datafile)
datafile := testSetupBackupData(t, env)
opts := BackupOptions{}
testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
@ -1073,10 +1160,7 @@ func TestFindJSON(t *testing.T) {
env, cleanup := withTestEnvironment(t)
defer cleanup()
datafile := filepath.Join("testdata", "backup-data.tar.gz")
testRunInit(t, env.gopts)
rtest.SetupTarTestFixture(t, env.testdata, datafile)
datafile := testSetupBackupData(t, env)
opts := BackupOptions{}
testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
@ -1199,18 +1283,7 @@ func TestPrune(t *testing.T) {
env, cleanup := withTestEnvironment(t)
defer cleanup()
datafile := filepath.Join("testdata", "backup-data.tar.gz")
fd, err := os.Open(datafile)
if os.IsNotExist(errors.Cause(err)) {
t.Skipf("unable to find data file %q, skipping", datafile)
return
}
rtest.OK(t, err)
rtest.OK(t, fd.Close())
testRunInit(t, env.gopts)
rtest.SetupTarTestFixture(t, env.testdata, datafile)
testSetupBackupData(t, env)
opts := BackupOptions{}
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
@ -1317,9 +1390,9 @@ func TestHardLink(t *testing.T) {
for i, snapshotID := range snapshotIDs {
restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir)
testRunRestore(t, env.gopts, restoredir, snapshotIDs[0])
rtest.Assert(t, directoriesEqualContents(env.testdata, filepath.Join(restoredir, "testdata")),
"directories are not equal")
testRunRestore(t, env.gopts, restoredir, snapshotID)
diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata"))
rtest.Assert(t, diff == "", "directories are not equal %v", diff)
linkResults := createFileSetPerHardlink(filepath.Join(restoredir, "testdata"))
rtest.Assert(t, linksEqual(linkTests, linkResults),
@ -1385,18 +1458,7 @@ func TestQuietBackup(t *testing.T) {
env, cleanup := withTestEnvironment(t)
defer cleanup()
datafile := filepath.Join("testdata", "backup-data.tar.gz")
fd, err := os.Open(datafile)
if os.IsNotExist(errors.Cause(err)) {
t.Skipf("unable to find data file %q, skipping", datafile)
return
}
rtest.OK(t, err)
rtest.OK(t, fd.Close())
testRunInit(t, env.gopts)
rtest.SetupTarTestFixture(t, env.testdata, datafile)
testSetupBackupData(t, env)
opts := BackupOptions{}
env.gopts.Quiet = false

View File

@ -54,7 +54,7 @@ directories in an encrypted repository stored on different backends.
if c.Name() == "version" {
return nil
}
pwd, err := resolvePassword(globalOptions)
pwd, err := resolvePassword(globalOptions, "RESTIC_PASSWORD")
if err != nil {
fmt.Fprintf(os.Stderr, "Resolving password failed: %v\n", err)
Exit(1)

View File

@ -82,6 +82,66 @@ Furthermore you can group the output by the same filters (host, paths, tags):
1 snapshots
Copying snapshots between repositories
======================================
In case you want to transfer snapshots between two repositories, for
example from a local to a remote repository, you can use the ``copy`` command:
.. code-block:: console
$ restic -r /srv/restic-repo copy --repo2 /srv/restic-repo-copy
repository d6504c63 opened successfully, password is correct
repository 3dd0878c opened successfully, password is correct
snapshot 410b18a2 of [/home/user/work] at 2020-06-09 23:15:57.305305 +0200 CEST)
copy started, this may take a while...
snapshot 7a746a07 saved
snapshot 4e5d5487 of [/home/user/work] at 2020-05-01 22:44:07.012113 +0200 CEST)
skipping snapshot 4e5d5487, was already copied to snapshot 50eb62b7
The example command copies all snapshots from the source repository
``/srv/restic-repo`` to the destination repository ``/srv/restic-repo-copy``.
Snapshots which have previously been copied between repositories will
be skipped by later copy runs.
.. note:: Note that this process will have to read (download) and write (upload) the
entire snapshot(s) due to the different encryption keys used in the source and
destination repository. Also, the transferred files are not re-chunked, which
may break deduplication between files already stored in the destination repo
and files copied there using this command.
For the destination repository ``--repo2`` the password can be read from
a file ``--password-file2`` or from a command ``--password-command2``.
Alternatively the environment variables ``$RESTIC_PASSWORD_COMMAND2`` and
``$RESTIC_PASSWORD_FILE2`` can be used. It is also possible to directly
pass the password via ``$RESTIC_PASSWORD2``. The key which should be used
for decryption can be selected by passing its ID via the flag ``--key-hint2``
or the environment variable ``$RESTIC_KEY_HINT2``.
In case the source and destination repository use the same backend, then
configuration options and environment variables to configure the backend
apply to both repositories. For example it might not be possible to specify
different accounts for the source and destination repository. You can
avoid this limitation by using the rclone backend along with remotes which
are configured in rclone.
The list of snapshots to copy can be filtered by host, path in the backup
and / or a comma-separated tag list:
.. code-block:: console
$ restic -r /srv/restic-repo copy --repo2 /srv/restic-repo-copy --host luigi --path /srv --tag foo,bar
It is also possible to explicitly specify the list of snapshots to copy, in
which case only these instead of all snapshots will be copied:
.. code-block:: console
$ restic -r /srv/restic-repo copy --repo2 /srv/restic-repo-copy 410b18a2 4e5d5487 latest
Checking integrity and consistency
==================================

View File

@ -20,6 +20,7 @@ Usage help is available:
cache Operate on local cache directories
cat Print internal objects to stdout
check Check the repository for errors
copy Copy snapshots from one repository to another
diff Show differences between two snapshots
dump Print a backed-up file to stdout
find Find a file, a directory or restic IDs
@ -79,10 +80,9 @@ command:
EXIT STATUS
===========
Exit status is 0 if the command was successful, and non-zero if there was any error.
Note that some issues such as unreadable or deleted files during backup
currently doesn't result in a non-zero error exit status.
Exit status is 0 if the command was successful.
Exit status is 1 if there was a fatal error (no snapshot created).
Exit status is 3 if some source data could not be read (incomplete snapshot created).
Usage:
restic backup [flags] FILE/DIR [FILE/DIR] ...