summaryrefslogtreecommitdiff
path: root/cmd
diff options
context:
space:
mode:
Diffstat (limited to 'cmd')
-rw-r--r--cmd/restic/cleanup.go7
-rw-r--r--cmd/restic/cmd_backup.go40
-rw-r--r--cmd/restic/cmd_backup_integration_test.go569
-rw-r--r--cmd/restic/cmd_cache.go2
-rw-r--r--cmd/restic/cmd_cat.go33
-rw-r--r--cmd/restic/cmd_check.go11
-rw-r--r--cmd/restic/cmd_check_integration_test.go34
-rw-r--r--cmd/restic/cmd_copy.go10
-rw-r--r--cmd/restic/cmd_copy_integration_test.go136
-rw-r--r--cmd/restic/cmd_debug.go14
-rw-r--r--cmd/restic/cmd_diff.go28
-rw-r--r--cmd/restic/cmd_diff_integration_test.go193
-rw-r--r--cmd/restic/cmd_dump.go9
-rw-r--r--cmd/restic/cmd_find.go40
-rw-r--r--cmd/restic/cmd_find_integration_test.go87
-rw-r--r--cmd/restic/cmd_forget.go107
-rw-r--r--cmd/restic/cmd_forget_integration_test.go13
-rw-r--r--cmd/restic/cmd_forget_test.go94
-rw-r--r--cmd/restic/cmd_generate.go22
-rw-r--r--cmd/restic/cmd_init.go23
-rw-r--r--cmd/restic/cmd_init_integration_test.go49
-rw-r--r--cmd/restic/cmd_key.go8
-rw-r--r--cmd/restic/cmd_key_integration_test.go145
-rw-r--r--cmd/restic/cmd_list.go8
-rw-r--r--cmd/restic/cmd_list_integration_test.go44
-rw-r--r--cmd/restic/cmd_ls.go15
-rw-r--r--cmd/restic/cmd_ls_integration_test.go19
-rw-r--r--cmd/restic/cmd_migrate.go2
-rw-r--r--cmd/restic/cmd_mount.go2
-rw-r--r--cmd/restic/cmd_mount_integration_test.go (renamed from cmd/restic/integration_fuse_test.go)10
-rw-r--r--cmd/restic/cmd_prune.go14
-rw-r--r--cmd/restic/cmd_prune_integration_test.go221
-rw-r--r--cmd/restic/cmd_recover.go2
-rw-r--r--cmd/restic/cmd_repair.go14
-rw-r--r--cmd/restic/cmd_repair_index.go (renamed from cmd/restic/cmd_rebuild_index.go)42
-rw-r--r--cmd/restic/cmd_repair_index_integration_test.go140
-rw-r--r--cmd/restic/cmd_repair_snapshots.go176
-rw-r--r--cmd/restic/cmd_repair_snapshots_integration_test.go135
-rw-r--r--cmd/restic/cmd_restore.go81
-rw-r--r--cmd/restic/cmd_restore_integration_test.go307
-rw-r--r--cmd/restic/cmd_rewrite.go59
-rw-r--r--cmd/restic/cmd_rewrite_integration_test.go (renamed from cmd/restic/integration_rewrite_test.go)0
-rw-r--r--cmd/restic/cmd_snapshots.go12
-rw-r--r--cmd/restic/cmd_snapshots_integration_test.go32
-rw-r--r--cmd/restic/cmd_stats.go192
-rw-r--r--cmd/restic/cmd_stats_test.go62
-rw-r--r--cmd/restic/cmd_tag.go2
-rw-r--r--cmd/restic/cmd_tag_integration_test.go94
-rw-r--r--cmd/restic/exclude.go33
-rw-r--r--cmd/restic/exclude_test.go48
-rw-r--r--cmd/restic/format.go14
-rw-r--r--cmd/restic/format_test.go61
-rw-r--r--cmd/restic/global.go278
-rw-r--r--cmd/restic/global_test.go18
-rw-r--r--cmd/restic/integration_filter_pattern_test.go8
-rw-r--r--cmd/restic/integration_helpers_test.go162
-rw-r--r--cmd/restic/integration_test.go2052
-rw-r--r--cmd/restic/local_layout_test.go41
-rw-r--r--cmd/restic/lock.go152
-rw-r--r--cmd/restic/lock_test.go212
-rw-r--r--cmd/restic/main.go18
61 files changed, 3817 insertions, 2639 deletions
diff --git a/cmd/restic/cleanup.go b/cmd/restic/cleanup.go
index 61af72802..75933fe96 100644
--- a/cmd/restic/cleanup.go
+++ b/cmd/restic/cleanup.go
@@ -62,6 +62,12 @@ func CleanupHandler(c <-chan os.Signal) {
debug.Log("signal %v received, cleaning up", s)
Warnf("%ssignal %v received, cleaning up\n", clearLine(0), s)
+ if val, _ := os.LookupEnv("RESTIC_DEBUG_STACKTRACE_SIGINT"); val != "" {
+ _, _ = os.Stderr.WriteString("\n--- STACKTRACE START ---\n\n")
+ _, _ = os.Stderr.WriteString(debug.DumpStacktrace())
+ _, _ = os.Stderr.WriteString("\n--- STACKTRACE END ---\n")
+ }
+
code := 0
if s == syscall.SIGINT {
@@ -78,5 +84,6 @@ func CleanupHandler(c <-chan os.Signal) {
// given exit code.
func Exit(code int) {
code = RunCleanupHandlers(code)
+ debug.Log("exiting with status code %d", code)
os.Exit(code)
}
diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go
index ec901828b..6b5706855 100644
--- a/cmd/restic/cmd_backup.go
+++ b/cmd/restic/cmd_backup.go
@@ -89,6 +89,7 @@ type BackupOptions struct {
excludePatternOptions
Parent string
+ GroupBy restic.SnapshotGroupByOptions
Force bool
ExcludeOtherFS bool
ExcludeIfPresent []string
@@ -120,7 +121,9 @@ func init() {
cmdRoot.AddCommand(cmdBackup)
f := cmdBackup.Flags()
- f.StringVar(&backupOptions.Parent, "parent", "", "use this parent `snapshot` (default: last snapshot in the repository that has the same target files/directories, and is not newer than the snapshot time)")
+ f.StringVar(&backupOptions.Parent, "parent", "", "use this parent `snapshot` (default: latest snapshot in the group determined by --group-by and not newer than the timestamp determined by --time)")
+ backupOptions.GroupBy = restic.SnapshotGroupByOptions{Host: true, Path: true}
+ f.VarP(&backupOptions.GroupBy, "group-by", "g", "`group` snapshots by host, paths and/or tags, separated by comma (disable grouping with '')")
f.BoolVarP(&backupOptions.Force, "force", "f", false, `force re-reading the target files/directories (overrides the "parent" flag)`)
initExcludePatternOptions(f, &backupOptions.excludePatternOptions)
@@ -305,7 +308,7 @@ func (opts BackupOptions) Check(gopts GlobalOptions, args []string) error {
// collectRejectByNameFuncs returns a list of all functions which may reject data
// from being saved in a snapshot based on path only
-func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository, targets []string) (fs []RejectByNameFunc, err error) {
+func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository) (fs []RejectByNameFunc, err error) {
// exclude restic cache
if repo.Cache != nil {
f, err := rejectResticCache(repo)
@@ -340,7 +343,7 @@ func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository, t
// collectRejectFuncs returns a list of all functions which may reject data
// from being saved in a snapshot based on path and file info
-func collectRejectFuncs(opts BackupOptions, repo *repository.Repository, targets []string) (fs []RejectFunc, err error) {
+func collectRejectFuncs(opts BackupOptions, targets []string) (fs []RejectFunc, err error) {
// allowed devices
if opts.ExcludeOtherFS && !opts.Stdin {
f, err := rejectByDevice(targets)
@@ -439,13 +442,18 @@ func findParentSnapshot(ctx context.Context, repo restic.Repository, opts Backup
if snName == "" {
snName = "latest"
}
- f := restic.SnapshotFilter{
- Hosts: []string{opts.Host},
- Paths: targets,
- TimestampLimit: timeStampLimit,
+ f := restic.SnapshotFilter{TimestampLimit: timeStampLimit}
+ if opts.GroupBy.Host {
+ f.Hosts = []string{opts.Host}
+ }
+ if opts.GroupBy.Path {
+ f.Paths = targets
+ }
+ if opts.GroupBy.Tag {
+ f.Tags = []restic.TagList{opts.Tags.Flatten()}
}
- sn, err := f.FindLatest(ctx, repo.Backend(), repo, snName)
+ sn, _, err := f.FindLatest(ctx, repo.Backend(), repo, snName)
// Snapshot not found is ok if no explicit parent was set
if opts.Parent == "" && errors.Is(err, restic.ErrNoSnapshotFound) {
err = nil
@@ -498,20 +506,23 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
if !gopts.JSON {
progressPrinter.V("lock repository")
}
- lock, ctx, err := lockRepo(ctx, repo)
- defer unlockRepo(lock)
- if err != nil {
- return err
+ if !opts.DryRun {
+ var lock *restic.Lock
+ lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
+ defer unlockRepo(lock)
+ if err != nil {
+ return err
+ }
}
// rejectByNameFuncs collect functions that can reject items from the backup based on path only
- rejectByNameFuncs, err := collectRejectByNameFuncs(opts, repo, targets)
+ rejectByNameFuncs, err := collectRejectByNameFuncs(opts, repo)
if err != nil {
return err
}
// rejectFuncs collect functions that can reject items from the backup based on path and file info
- rejectFuncs, err := collectRejectFuncs(opts, repo, targets)
+ rejectFuncs, err := collectRejectFuncs(opts, targets)
if err != nil {
return err
}
@@ -637,6 +648,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
Time: timeStamp,
Hostname: opts.Host,
ParentSnapshot: parentSnapshot,
+ ProgramVersion: "restic " + version,
}
if !gopts.JSON {
diff --git a/cmd/restic/cmd_backup_integration_test.go b/cmd/restic/cmd_backup_integration_test.go
new file mode 100644
index 000000000..fb7bef633
--- /dev/null
+++ b/cmd/restic/cmd_backup_integration_test.go
@@ -0,0 +1,569 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "runtime"
+ "testing"
+
+ "github.com/restic/restic/internal/fs"
+ "github.com/restic/restic/internal/restic"
+ rtest "github.com/restic/restic/internal/test"
+ "github.com/restic/restic/internal/ui/termstatus"
+)
+
+func testRunBackupAssumeFailure(t testing.TB, dir string, target []string, opts BackupOptions, gopts GlobalOptions) error {
+ return withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error {
+ t.Logf("backing up %v in %v", target, dir)
+ if dir != "" {
+ cleanup := rtest.Chdir(t, dir)
+ defer cleanup()
+ }
+
+ opts.GroupBy = restic.SnapshotGroupByOptions{Host: true, Path: true}
+ return runBackup(ctx, opts, gopts, term, target)
+ })
+}
+
+func testRunBackup(t testing.TB, dir string, target []string, opts BackupOptions, gopts GlobalOptions) {
+ err := testRunBackupAssumeFailure(t, dir, target, opts, gopts)
+ rtest.Assert(t, err == nil, "Error while backing up")
+}
+
+func TestBackup(t *testing.T) {
+ testBackup(t, false)
+}
+
+func TestBackupWithFilesystemSnapshots(t *testing.T) {
+ if runtime.GOOS == "windows" && fs.HasSufficientPrivilegesForVSS() == nil {
+ testBackup(t, true)
+ }
+}
+
+func testBackup(t *testing.T, useFsSnapshot bool) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testSetupBackupData(t, env)
+ opts := BackupOptions{UseFsSnapshot: useFsSnapshot}
+
+ // first backup
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
+ testListSnapshots(t, env.gopts, 1)
+
+ testRunCheck(t, env.gopts)
+ stat1 := dirStats(env.repo)
+
+ // second backup, implicit incremental
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
+ snapshotIDs := testListSnapshots(t, env.gopts, 2)
+
+ stat2 := dirStats(env.repo)
+ if stat2.size > stat1.size+stat1.size/10 {
+ t.Error("repository size has grown by more than 10 percent")
+ }
+ t.Logf("repository grown by %d bytes", stat2.size-stat1.size)
+
+ testRunCheck(t, env.gopts)
+ // third backup, explicit incremental
+ opts.Parent = snapshotIDs[0].String()
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
+ snapshotIDs = testListSnapshots(t, env.gopts, 3)
+
+ stat3 := dirStats(env.repo)
+ if stat3.size > stat1.size+stat1.size/10 {
+ t.Error("repository size has grown by more than 10 percent")
+ }
+ t.Logf("repository grown by %d bytes", stat3.size-stat2.size)
+
+ // restore all backups and compare
+ for i, snapshotID := range snapshotIDs {
+ restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
+ t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir)
+ testRunRestore(t, env.gopts, restoredir, snapshotID)
+ diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata"))
+ rtest.Assert(t, diff == "", "directories are not equal: %v", diff)
+ }
+
+ testRunCheck(t, env.gopts)
+}
+
+func TestBackupWithRelativePath(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testSetupBackupData(t, env)
+ opts := BackupOptions{}
+
+ // first backup
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
+ firstSnapshotID := testListSnapshots(t, env.gopts, 1)[0]
+
+ // second backup, implicit incremental
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
+
+ // that the correct parent snapshot was used
+ latestSn, _ := testRunSnapshots(t, env.gopts)
+ rtest.Assert(t, latestSn != nil, "missing latest snapshot")
+ rtest.Assert(t, latestSn.Parent != nil && latestSn.Parent.Equal(firstSnapshotID), "second snapshot selected unexpected parent %v instead of %v", latestSn.Parent, firstSnapshotID)
+}
+
+func TestBackupParentSelection(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testSetupBackupData(t, env)
+ opts := BackupOptions{}
+
+ // first backup
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata/0/0"}, opts, env.gopts)
+ firstSnapshotID := testListSnapshots(t, env.gopts, 1)[0]
+
+ // second backup, sibling path
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata/0/tests"}, opts, env.gopts)
+ testListSnapshots(t, env.gopts, 2)
+
+ // third backup, incremental for the first backup
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata/0/0"}, opts, env.gopts)
+
+ // test that the correct parent snapshot was used
+ latestSn, _ := testRunSnapshots(t, env.gopts)
+ rtest.Assert(t, latestSn != nil, "missing latest snapshot")
+ rtest.Assert(t, latestSn.Parent != nil && latestSn.Parent.Equal(firstSnapshotID), "third snapshot selected unexpected parent %v instead of %v", latestSn.Parent, firstSnapshotID)
+}
+
+func TestDryRunBackup(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testSetupBackupData(t, env)
+ opts := BackupOptions{}
+ dryOpts := BackupOptions{DryRun: true}
+
+ // dry run before first backup
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, dryOpts, env.gopts)
+ snapshotIDs := testListSnapshots(t, env.gopts, 0)
+ packIDs := testRunList(t, "packs", env.gopts)
+ rtest.Assert(t, len(packIDs) == 0,
+ "expected no data, got %v", snapshotIDs)
+ indexIDs := testRunList(t, "index", env.gopts)
+ rtest.Assert(t, len(indexIDs) == 0,
+ "expected no index, got %v", snapshotIDs)
+
+ // first backup
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
+ snapshotIDs = testListSnapshots(t, env.gopts, 1)
+ packIDs = testRunList(t, "packs", env.gopts)
+ indexIDs = testRunList(t, "index", env.gopts)
+
+ // dry run between backups
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, dryOpts, env.gopts)
+ snapshotIDsAfter := testListSnapshots(t, env.gopts, 1)
+ rtest.Equals(t, snapshotIDs, snapshotIDsAfter)
+ dataIDsAfter := testRunList(t, "packs", env.gopts)
+ rtest.Equals(t, packIDs, dataIDsAfter)
+ indexIDsAfter := testRunList(t, "index", env.gopts)
+ rtest.Equals(t, indexIDs, indexIDsAfter)
+
+ // second backup, implicit incremental
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
+ snapshotIDs = testListSnapshots(t, env.gopts, 2)
+ packIDs = testRunList(t, "packs", env.gopts)
+ indexIDs = testRunList(t, "index", env.gopts)
+
+ // another dry run
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, dryOpts, env.gopts)
+ snapshotIDsAfter = testListSnapshots(t, env.gopts, 2)
+ rtest.Equals(t, snapshotIDs, snapshotIDsAfter)
+ dataIDsAfter = testRunList(t, "packs", env.gopts)
+ rtest.Equals(t, packIDs, dataIDsAfter)
+ indexIDsAfter = testRunList(t, "index", env.gopts)
+ rtest.Equals(t, indexIDs, indexIDsAfter)
+}
+
+func TestBackupNonExistingFile(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testSetupBackupData(t, env)
+
+ _ = withRestoreGlobalOptions(func() error {
+ globalOptions.stderr = io.Discard
+
+ p := filepath.Join(env.testdata, "0", "0", "9")
+ dirs := []string{
+ filepath.Join(p, "0"),
+ filepath.Join(p, "1"),
+ filepath.Join(p, "nonexisting"),
+ filepath.Join(p, "5"),
+ }
+
+ opts := BackupOptions{}
+
+ testRunBackup(t, "", dirs, opts, env.gopts)
+ return nil
+ })
+}
+
+func TestBackupSelfHealing(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ p := filepath.Join(env.testdata, "test/test")
+ rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
+ rtest.OK(t, appendRandomData(p, 5))
+
+ opts := BackupOptions{}
+
+ testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ // remove all data packs
+ removePacksExcept(env.gopts, t, restic.NewIDSet(), false)
+
+ testRunRebuildIndex(t, env.gopts)
+ // now the repo is also missing the data blob in the index; check should report this
+ testRunCheckMustFail(t, env.gopts)
+
+ // second backup should report an error but "heal" this situation
+ err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
+ rtest.Assert(t, err != nil,
+ "backup should have reported an error")
+ testRunCheck(t, env.gopts)
+}
+
+func TestBackupTreeLoadError(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+ p := filepath.Join(env.testdata, "test/test")
+ rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
+ rtest.OK(t, appendRandomData(p, 5))
+
+ opts := BackupOptions{}
+ // Backup a subdirectory first, such that we can remove the tree pack for the subdirectory
+ testRunBackup(t, env.testdata, []string{"test"}, opts, env.gopts)
+
+ r, err := OpenRepository(context.TODO(), env.gopts)
+ rtest.OK(t, err)
+ rtest.OK(t, r.LoadIndex(context.TODO()))
+ treePacks := restic.NewIDSet()
+ r.Index().Each(context.TODO(), func(pb restic.PackedBlob) {
+ if pb.Type == restic.TreeBlob {
+ treePacks.Insert(pb.PackID)
+ }
+ })
+
+ testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ // delete the subdirectory pack first
+ for id := range treePacks {
+ rtest.OK(t, r.Backend().Remove(context.TODO(), restic.Handle{Type: restic.PackFile, Name: id.String()}))
+ }
+ testRunRebuildIndex(t, env.gopts)
+ // now the repo is missing the tree blob in the index; check should report this
+ testRunCheckMustFail(t, env.gopts)
+ // second backup should report an error but "heal" this situation
+ err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
+ rtest.Assert(t, err != nil, "backup should have reported an error for the subdirectory")
+ testRunCheck(t, env.gopts)
+
+ // remove all tree packs
+ removePacksExcept(env.gopts, t, restic.NewIDSet(), true)
+ testRunRebuildIndex(t, env.gopts)
+ // now the repo is also missing the data blob in the index; check should report this
+ testRunCheckMustFail(t, env.gopts)
+ // second backup should report an error but "heal" this situation
+ err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
+ rtest.Assert(t, err != nil, "backup should have reported an error")
+ testRunCheck(t, env.gopts)
+}
+
+var backupExcludeFilenames = []string{
+ "testfile1",
+ "foo.tar.gz",
+ "private/secret/passwords.txt",
+ "work/source/test.c",
+}
+
+func TestBackupExclude(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ datadir := filepath.Join(env.base, "testdata")
+
+ for _, filename := range backupExcludeFilenames {
+ fp := filepath.Join(datadir, filename)
+ rtest.OK(t, os.MkdirAll(filepath.Dir(fp), 0755))
+
+ f, err := os.Create(fp)
+ rtest.OK(t, err)
+
+ fmt.Fprint(f, filename)
+ rtest.OK(t, f.Close())
+ }
+
+ snapshots := make(map[string]struct{})
+
+ opts := BackupOptions{}
+
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
+ snapshots, snapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
+ files := testRunLs(t, env.gopts, snapshotID)
+ rtest.Assert(t, includes(files, "/testdata/foo.tar.gz"),
+ "expected file %q in first snapshot, but it's not included", "foo.tar.gz")
+
+ opts.Excludes = []string{"*.tar.gz"}
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
+ snapshots, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
+ files = testRunLs(t, env.gopts, snapshotID)
+ rtest.Assert(t, !includes(files, "/testdata/foo.tar.gz"),
+ "expected file %q not in first snapshot, but it's included", "foo.tar.gz")
+
+ opts.Excludes = []string{"*.tar.gz", "private/secret"}
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
+ _, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
+ files = testRunLs(t, env.gopts, snapshotID)
+ rtest.Assert(t, !includes(files, "/testdata/foo.tar.gz"),
+ "expected file %q not in first snapshot, but it's included", "foo.tar.gz")
+ rtest.Assert(t, !includes(files, "/testdata/private/secret/passwords.txt"),
+ "expected file %q not in first snapshot, but it's included", "passwords.txt")
+}
+
+func TestBackupErrors(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ return
+ }
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testSetupBackupData(t, env)
+
+ // Assume failure
+ inaccessibleFile := filepath.Join(env.testdata, "0", "0", "9", "0")
+ rtest.OK(t, os.Chmod(inaccessibleFile, 0000))
+ defer func() {
+ rtest.OK(t, os.Chmod(inaccessibleFile, 0644))
+ }()
+ opts := BackupOptions{}
+ err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
+ rtest.Assert(t, err != nil, "Assumed failure, but no error occurred.")
+ rtest.Assert(t, err == ErrInvalidSourceData, "Wrong error returned")
+ testListSnapshots(t, env.gopts, 1)
+}
+
+const (
+ incrementalFirstWrite = 10 * 1042 * 1024
+ incrementalSecondWrite = 1 * 1042 * 1024
+ incrementalThirdWrite = 1 * 1042 * 1024
+)
+
+func TestIncrementalBackup(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ datadir := filepath.Join(env.base, "testdata")
+ testfile := filepath.Join(datadir, "testfile")
+
+ rtest.OK(t, appendRandomData(testfile, incrementalFirstWrite))
+
+ opts := BackupOptions{}
+
+ testRunBackup(t, "", []string{datadir}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+ stat1 := dirStats(env.repo)
+
+ rtest.OK(t, appendRandomData(testfile, incrementalSecondWrite))
+
+ testRunBackup(t, "", []string{datadir}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+ stat2 := dirStats(env.repo)
+ if stat2.size-stat1.size > incrementalFirstWrite {
+ t.Errorf("repository size has grown by more than %d bytes", incrementalFirstWrite)
+ }
+ t.Logf("repository grown by %d bytes", stat2.size-stat1.size)
+
+ rtest.OK(t, appendRandomData(testfile, incrementalThirdWrite))
+
+ testRunBackup(t, "", []string{datadir}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+ stat3 := dirStats(env.repo)
+ if stat3.size-stat2.size > incrementalFirstWrite {
+ t.Errorf("repository size has grown by more than %d bytes", incrementalFirstWrite)
+ }
+ t.Logf("repository grown by %d bytes", stat3.size-stat2.size)
+}
+
+func TestBackupTags(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testSetupBackupData(t, env)
+ opts := BackupOptions{}
+
+ testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+ newest, _ := testRunSnapshots(t, env.gopts)
+
+ if newest == nil {
+ t.Fatal("expected a backup, got nil")
+ }
+
+ rtest.Assert(t, len(newest.Tags) == 0,
+ "expected no tags, got %v", newest.Tags)
+ parent := newest
+
+ opts.Tags = restic.TagLists{[]string{"NL"}}
+ testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+ newest, _ = testRunSnapshots(t, env.gopts)
+
+ if newest == nil {
+ t.Fatal("expected a backup, got nil")
+ }
+
+ rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL",
+ "expected one NL tag, got %v", newest.Tags)
+ // Tagged backup should have untagged backup as parent.
+ rtest.Assert(t, parent.ID.Equal(*newest.Parent),
+ "expected parent to be %v, got %v", parent.ID, newest.Parent)
+}
+
+func TestBackupProgramVersion(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testSetupBackupData(t, env)
+ testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
+ newest, _ := testRunSnapshots(t, env.gopts)
+
+ if newest == nil {
+ t.Fatal("expected a backup, got nil")
+ }
+ resticVersion := "restic " + version
+ rtest.Assert(t, newest.ProgramVersion == resticVersion,
+ "expected %v, got %v", resticVersion, newest.ProgramVersion)
+}
+
+func TestQuietBackup(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testSetupBackupData(t, env)
+ opts := BackupOptions{}
+
+ env.gopts.Quiet = false
+ testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
+ testListSnapshots(t, env.gopts, 1)
+
+ testRunCheck(t, env.gopts)
+
+ env.gopts.Quiet = true
+ testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
+ testListSnapshots(t, env.gopts, 2)
+
+ testRunCheck(t, env.gopts)
+}
+
+func TestHardLink(t *testing.T) {
+ // this test assumes a test set with a single directory containing hard linked files
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := filepath.Join("testdata", "test.hl.tar.gz")
+ fd, err := os.Open(datafile)
+ if os.IsNotExist(err) {
+ t.Skipf("unable to find data file %q, skipping", datafile)
+ return
+ }
+ rtest.OK(t, err)
+ rtest.OK(t, fd.Close())
+
+ testRunInit(t, env.gopts)
+
+ rtest.SetupTarTestFixture(t, env.testdata, datafile)
+
+ linkTests := createFileSetPerHardlink(env.testdata)
+
+ opts := BackupOptions{}
+
+ // first backup
+ testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
+ snapshotIDs := testListSnapshots(t, env.gopts, 1)
+
+ testRunCheck(t, env.gopts)
+
+ // restore all backups and compare
+ for i, snapshotID := range snapshotIDs {
+ restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
+ t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir)
+ testRunRestore(t, env.gopts, restoredir, snapshotID)
+ diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata"))
+ rtest.Assert(t, diff == "", "directories are not equal %v", diff)
+
+ linkResults := createFileSetPerHardlink(filepath.Join(restoredir, "testdata"))
+ rtest.Assert(t, linksEqual(linkTests, linkResults),
+ "links are not equal")
+ }
+
+ testRunCheck(t, env.gopts)
+}
+
+func linksEqual(source, dest map[uint64][]string) bool {
+ for _, vs := range source {
+ found := false
+ for kd, vd := range dest {
+ if linkEqual(vs, vd) {
+ delete(dest, kd)
+ found = true
+ break
+ }
+ }
+ if !found {
+ return false
+ }
+ }
+
+ return len(dest) == 0
+}
+
+func linkEqual(source, dest []string) bool {
+ // equal if sliced are equal without considering order
+ if source == nil && dest == nil {
+ return true
+ }
+
+ if source == nil || dest == nil {
+ return false
+ }
+
+ if len(source) != len(dest) {
+ return false
+ }
+
+ for i := range source {
+ found := false
+ for j := range dest {
+ if source[i] == dest[j] {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/cmd/restic/cmd_cache.go b/cmd/restic/cmd_cache.go
index 334063fdc..4a10d1027 100644
--- a/cmd/restic/cmd_cache.go
+++ b/cmd/restic/cmd_cache.go
@@ -155,7 +155,7 @@ func runCache(opts CacheOptions, gopts GlobalOptions, args []string) error {
})
}
- _ = tab.Write(gopts.stdout)
+ _ = tab.Write(globalOptions.stdout)
Printf("%d cache dirs in %s\n", len(dirs), cachedir)
return nil
diff --git a/cmd/restic/cmd_cat.go b/cmd/restic/cmd_cat.go
index f46502d5a..7c4373812 100644
--- a/cmd/restic/cmd_cat.go
+++ b/cmd/restic/cmd_cat.go
@@ -13,7 +13,7 @@ import (
)
var cmdCat = &cobra.Command{
- Use: "cat [flags] [pack|blob|snapshot|index|key|masterkey|config|lock] ID",
+ Use: "cat [flags] [masterkey|config|pack ID|blob ID|snapshot ID|index ID|key ID|lock ID|tree snapshot:subfolder]",
Short: "Print internal objects to stdout",
Long: `
The "cat" command is used to print internal objects to stdout.
@@ -45,7 +45,7 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
if !gopts.NoLock {
var lock *restic.Lock
- lock, ctx, err = lockRepo(ctx, repo)
+ lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
@@ -55,7 +55,7 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
tpe := args[0]
var id restic.ID
- if tpe != "masterkey" && tpe != "config" && tpe != "snapshot" {
+ if tpe != "masterkey" && tpe != "config" && tpe != "snapshot" && tpe != "tree" {
id, err = restic.ParseID(args[1])
if err != nil {
return errors.Fatalf("unable to parse ID: %v\n", err)
@@ -72,7 +72,7 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
Println(string(buf))
return nil
case "index":
- buf, err := repo.LoadUnpacked(ctx, restic.IndexFile, id, nil)
+ buf, err := repo.LoadUnpacked(ctx, restic.IndexFile, id)
if err != nil {
return err
}
@@ -80,7 +80,7 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
Println(string(buf))
return nil
case "snapshot":
- sn, err := restic.FindSnapshot(ctx, repo.Backend(), repo, args[1])
+ sn, _, err := restic.FindSnapshot(ctx, repo.Backend(), repo, args[1])
if err != nil {
return errors.Fatalf("could not find snapshot: %v\n", err)
}
@@ -165,6 +165,29 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
return errors.Fatal("blob not found")
+ case "tree":
+ sn, subfolder, err := restic.FindSnapshot(ctx, repo.Backend(), repo, args[1])
+ if err != nil {
+ return errors.Fatalf("could not find snapshot: %v\n", err)
+ }
+
+ err = repo.LoadIndex(ctx)
+ if err != nil {
+ return err
+ }
+
+ sn.Tree, err = restic.FindTreeDirectory(ctx, repo, sn.Tree, subfolder)
+ if err != nil {
+ return err
+ }
+
+ buf, err := repo.LoadBlob(ctx, restic.TreeBlob, *sn.Tree, nil)
+ if err != nil {
+ return err
+ }
+ _, err = globalOptions.stdout.Write(buf)
+ return err
+
default:
return errors.Fatal("invalid type")
}
diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go
index be9dd5130..3c4c9daa9 100644
--- a/cmd/restic/cmd_check.go
+++ b/cmd/restic/cmd_check.go
@@ -16,6 +16,7 @@ import (
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/restic"
+ "github.com/restic/restic/internal/ui"
)
var cmdCheck = &cobra.Command{
@@ -65,7 +66,7 @@ func init() {
// MarkDeprecated only returns an error when the flag is not found
panic(err)
}
- f.BoolVar(&checkOptions.WithCache, "with-cache", false, "use the cache")
+ f.BoolVar(&checkOptions.WithCache, "with-cache", false, "use existing cache, only read uncached data from repository")
}
func checkFlags(opts CheckOptions) error {
@@ -97,7 +98,7 @@ func checkFlags(opts CheckOptions) error {
}
} else {
- fileSize, err := parseSizeStr(opts.ReadDataSubset)
+ fileSize, err := ui.ParseBytes(opts.ReadDataSubset)
if err != nil {
return argumentError
}
@@ -211,7 +212,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
if !gopts.NoLock {
Verbosef("create exclusive lock for repository\n")
var lock *restic.Lock
- lock, ctx, err = lockRepoExclusive(ctx, repo)
+ lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
@@ -245,7 +246,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
}
if suggestIndexRebuild {
- Printf("Duplicate packs/old indexes are non-critical, you can run `restic rebuild-index' to correct this.\n")
+ Printf("Duplicate packs/old indexes are non-critical, you can run `restic repair index' to correct this.\n")
}
if mixedFound {
Printf("Mixed packs with tree and data blobs are non-critical, you can run `restic prune` to correct this.\n")
@@ -363,7 +364,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
if repoSize == 0 {
return errors.Fatal("Cannot read from a repository having size 0")
}
- subsetSize, _ := parseSizeStr(opts.ReadDataSubset)
+ subsetSize, _ := ui.ParseBytes(opts.ReadDataSubset)
if subsetSize > repoSize {
subsetSize = repoSize
}
diff --git a/cmd/restic/cmd_check_integration_test.go b/cmd/restic/cmd_check_integration_test.go
new file mode 100644
index 000000000..9eb4fec62
--- /dev/null
+++ b/cmd/restic/cmd_check_integration_test.go
@@ -0,0 +1,34 @@
+package main
+
+import (
+ "context"
+ "testing"
+
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func testRunCheck(t testing.TB, gopts GlobalOptions) {
+ t.Helper()
+ output, err := testRunCheckOutput(gopts, true)
+ if err != nil {
+ t.Error(output)
+ t.Fatalf("unexpected error: %+v", err)
+ }
+}
+
+func testRunCheckMustFail(t testing.TB, gopts GlobalOptions) {
+ t.Helper()
+ _, err := testRunCheckOutput(gopts, false)
+ rtest.Assert(t, err != nil, "expected non nil error after check of damaged repository")
+}
+
+func testRunCheckOutput(gopts GlobalOptions, checkUnused bool) (string, error) {
+ buf, err := withCaptureStdout(func() error {
+ opts := CheckOptions{
+ ReadData: true,
+ CheckUnused: checkUnused,
+ }
+ return runCheck(context.TODO(), opts, gopts, nil)
+ })
+ return buf.String(), err
+}
diff --git a/cmd/restic/cmd_copy.go b/cmd/restic/cmd_copy.go
index 2f095972a..eaa0ef81a 100644
--- a/cmd/restic/cmd_copy.go
+++ b/cmd/restic/cmd_copy.go
@@ -6,6 +6,7 @@ import (
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/debug"
+ "github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"golang.org/x/sync/errgroup"
@@ -74,14 +75,14 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []
if !gopts.NoLock {
var srcLock *restic.Lock
- srcLock, ctx, err = lockRepo(ctx, srcRepo)
+ srcLock, ctx, err = lockRepo(ctx, srcRepo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(srcLock)
if err != nil {
return err
}
}
- dstLock, ctx, err := lockRepo(ctx, dstRepo)
+ dstLock, ctx, err := lockRepo(ctx, dstRepo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(dstLock)
if err != nil {
return err
@@ -236,5 +237,8 @@ func copyTree(ctx context.Context, srcRepo restic.Repository, dstRepo restic.Rep
bar := newProgressMax(!quiet, uint64(len(packList)), "packs copied")
_, err = repository.Repack(ctx, srcRepo, dstRepo, packList, copyBlobs, bar)
bar.Done()
- return err
+ if err != nil {
+ return errors.Fatal(err.Error())
+ }
+ return nil
}
diff --git a/cmd/restic/cmd_copy_integration_test.go b/cmd/restic/cmd_copy_integration_test.go
new file mode 100644
index 000000000..1c8837690
--- /dev/null
+++ b/cmd/restic/cmd_copy_integration_test.go
@@ -0,0 +1,136 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "path/filepath"
+ "testing"
+
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func testRunCopy(t testing.TB, srcGopts GlobalOptions, dstGopts GlobalOptions) {
+ gopts := srcGopts
+ gopts.Repo = dstGopts.Repo
+ gopts.password = dstGopts.password
+ copyOpts := CopyOptions{
+ secondaryRepoOptions: secondaryRepoOptions{
+ Repo: srcGopts.Repo,
+ password: srcGopts.password,
+ },
+ }
+
+ rtest.OK(t, runCopy(context.TODO(), copyOpts, gopts, nil))
+}
+
+func TestCopy(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+ env2, cleanup2 := withTestEnvironment(t)
+ defer cleanup2()
+
+ testSetupBackupData(t, env)
+ opts := BackupOptions{}
+ testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
+ testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
+ testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ testRunInit(t, env2.gopts)
+ testRunCopy(t, env.gopts, env2.gopts)
+
+ snapshotIDs := testListSnapshots(t, env.gopts, 3)
+ copiedSnapshotIDs := testListSnapshots(t, env2.gopts, 3)
+
+ // Check that the copies size seems reasonable
+ stat := dirStats(env.repo)
+ stat2 := dirStats(env2.repo)
+ sizeDiff := int64(stat.size) - int64(stat2.size)
+ if sizeDiff < 0 {
+ sizeDiff = -sizeDiff
+ }
+ rtest.Assert(t, sizeDiff < int64(stat.size)/50, "expected less than 2%% size difference: %v vs. %v",
+ stat.size, stat2.size)
+
+ // Check integrity of the copy
+ testRunCheck(t, env2.gopts)
+
+ // Check that the copied snapshots have the same tree contents as the old ones (= identical tree hash)
+ origRestores := make(map[string]struct{})
+ for i, snapshotID := range snapshotIDs {
+ restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
+ origRestores[restoredir] = struct{}{}
+ testRunRestore(t, env.gopts, restoredir, snapshotID)
+ }
+ for i, snapshotID := range copiedSnapshotIDs {
+ restoredir := filepath.Join(env2.base, fmt.Sprintf("restore%d", i))
+ testRunRestore(t, env2.gopts, restoredir, snapshotID)
+ foundMatch := false
+ for cmpdir := range origRestores {
+ diff := directoriesContentsDiff(restoredir, cmpdir)
+ if diff == "" {
+ delete(origRestores, cmpdir)
+ foundMatch = true
+ }
+ }
+
+ rtest.Assert(t, foundMatch, "found no counterpart for snapshot %v", snapshotID)
+ }
+
+ rtest.Assert(t, len(origRestores) == 0, "found not copied snapshots")
+}
+
+func TestCopyIncremental(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+ env2, cleanup2 := withTestEnvironment(t)
+ defer cleanup2()
+
+ testSetupBackupData(t, env)
+ opts := BackupOptions{}
+ testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
+ testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ testRunInit(t, env2.gopts)
+ testRunCopy(t, env.gopts, env2.gopts)
+
+ testListSnapshots(t, env.gopts, 2)
+ testListSnapshots(t, env2.gopts, 2)
+
+ // Check that the copies size seems reasonable
+ testRunCheck(t, env2.gopts)
+
+ // check that no snapshots are copied, as there are no new ones
+ testRunCopy(t, env.gopts, env2.gopts)
+ testRunCheck(t, env2.gopts)
+ testListSnapshots(t, env2.gopts, 2)
+
+ // check that only new snapshots are copied
+ testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
+ testRunCopy(t, env.gopts, env2.gopts)
+ testRunCheck(t, env2.gopts)
+ testListSnapshots(t, env.gopts, 3)
+ testListSnapshots(t, env2.gopts, 3)
+
+ // also test the reverse direction
+ testRunCopy(t, env2.gopts, env.gopts)
+ testRunCheck(t, env.gopts)
+ testListSnapshots(t, env.gopts, 3)
+}
+
+func TestCopyUnstableJSON(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+ env2, cleanup2 := withTestEnvironment(t)
+ defer cleanup2()
+
+ // contains a symlink created using `ln -s '../i/'$'\355\246\361''d/samba' broken-symlink`
+ datafile := filepath.Join("testdata", "copy-unstable-json.tar.gz")
+ rtest.SetupTarTestFixture(t, env.base, datafile)
+
+ testRunInit(t, env2.gopts)
+ testRunCopy(t, env.gopts, env2.gopts)
+ testRunCheck(t, env2.gopts)
+ testListSnapshots(t, env2.gopts, 1)
+}
diff --git a/cmd/restic/cmd_debug.go b/cmd/restic/cmd_debug.go
index c8626d46c..a54200c45 100644
--- a/cmd/restic/cmd_debug.go
+++ b/cmd/restic/cmd_debug.go
@@ -156,7 +156,7 @@ func runDebugDump(ctx context.Context, gopts GlobalOptions, args []string) error
if !gopts.NoLock {
var lock *restic.Lock
- lock, ctx, err = lockRepo(ctx, repo)
+ lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
@@ -167,20 +167,20 @@ func runDebugDump(ctx context.Context, gopts GlobalOptions, args []string) error
switch tpe {
case "indexes":
- return dumpIndexes(ctx, repo, gopts.stdout)
+ return dumpIndexes(ctx, repo, globalOptions.stdout)
case "snapshots":
- return debugPrintSnapshots(ctx, repo, gopts.stdout)
+ return debugPrintSnapshots(ctx, repo, globalOptions.stdout)
case "packs":
- return printPacks(ctx, repo, gopts.stdout)
+ return printPacks(ctx, repo, globalOptions.stdout)
case "all":
Printf("snapshots:\n")
- err := debugPrintSnapshots(ctx, repo, gopts.stdout)
+ err := debugPrintSnapshots(ctx, repo, globalOptions.stdout)
if err != nil {
return err
}
Printf("\nindexes:\n")
- err = dumpIndexes(ctx, repo, gopts.stdout)
+ err = dumpIndexes(ctx, repo, globalOptions.stdout)
if err != nil {
return err
}
@@ -462,7 +462,7 @@ func runDebugExamine(ctx context.Context, gopts GlobalOptions, args []string) er
if !gopts.NoLock {
var lock *restic.Lock
- lock, ctx, err = lockRepo(ctx, repo)
+ lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
diff --git a/cmd/restic/cmd_diff.go b/cmd/restic/cmd_diff.go
index 0000fd18a..28e60f464 100644
--- a/cmd/restic/cmd_diff.go
+++ b/cmd/restic/cmd_diff.go
@@ -54,12 +54,12 @@ func init() {
f.BoolVar(&diffOptions.ShowMetadata, "metadata", false, "print changes in metadata")
}
-func loadSnapshot(ctx context.Context, be restic.Lister, repo restic.Repository, desc string) (*restic.Snapshot, error) {
- sn, err := restic.FindSnapshot(ctx, be, repo, desc)
+func loadSnapshot(ctx context.Context, be restic.Lister, repo restic.Repository, desc string) (*restic.Snapshot, string, error) {
+ sn, subfolder, err := restic.FindSnapshot(ctx, be, repo, desc)
if err != nil {
- return nil, errors.Fatal(err.Error())
+ return nil, "", errors.Fatal(err.Error())
}
- return sn, err
+ return sn, subfolder, err
}
// Comparer collects all things needed to compare two snapshots.
@@ -334,7 +334,7 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
if !gopts.NoLock {
var lock *restic.Lock
- lock, ctx, err = lockRepo(ctx, repo)
+ lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
@@ -346,12 +346,12 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
if err != nil {
return err
}
- sn1, err := loadSnapshot(ctx, be, repo, args[0])
+ sn1, subfolder1, err := loadSnapshot(ctx, be, repo, args[0])
if err != nil {
return err
}
- sn2, err := loadSnapshot(ctx, be, repo, args[1])
+ sn2, subfolder2, err := loadSnapshot(ctx, be, repo, args[1])
if err != nil {
return err
}
@@ -372,6 +372,16 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
return errors.Errorf("snapshot %v has nil tree", sn2.ID().Str())
}
+ sn1.Tree, err = restic.FindTreeDirectory(ctx, repo, sn1.Tree, subfolder1)
+ if err != nil {
+ return err
+ }
+
+ sn2.Tree, err = restic.FindTreeDirectory(ctx, repo, sn2.Tree, subfolder2)
+ if err != nil {
+ return err
+ }
+
c := &Comparer{
repo: repo,
opts: diffOptions,
@@ -381,7 +391,7 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
}
if gopts.JSON {
- enc := json.NewEncoder(gopts.stdout)
+ enc := json.NewEncoder(globalOptions.stdout)
c.printChange = func(change *Change) {
err := enc.Encode(change)
if err != nil {
@@ -415,7 +425,7 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
updateBlobs(repo, stats.BlobsAfter.Sub(both).Sub(stats.BlobsCommon), &stats.Added)
if gopts.JSON {
- err := json.NewEncoder(gopts.stdout).Encode(stats)
+ err := json.NewEncoder(globalOptions.stdout).Encode(stats)
if err != nil {
Warnf("JSON encode failed: %v\n", err)
}
diff --git a/cmd/restic/cmd_diff_integration_test.go b/cmd/restic/cmd_diff_integration_test.go
new file mode 100644
index 000000000..8782053ed
--- /dev/null
+++ b/cmd/restic/cmd_diff_integration_test.go
@@ -0,0 +1,193 @@
+package main
+
+import (
+ "bufio"
+ "context"
+ "encoding/json"
+ "io"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "testing"
+
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func testRunDiffOutput(gopts GlobalOptions, firstSnapshotID string, secondSnapshotID string) (string, error) {
+ buf, err := withCaptureStdout(func() error {
+ opts := DiffOptions{
+ ShowMetadata: false,
+ }
+ return runDiff(context.TODO(), opts, gopts, []string{firstSnapshotID, secondSnapshotID})
+ })
+ return buf.String(), err
+}
+
+func copyFile(dst string, src string) error {
+ srcFile, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+
+ dstFile, err := os.Create(dst)
+ if err != nil {
+ // ignore subsequent errors
+ _ = srcFile.Close()
+ return err
+ }
+
+ _, err = io.Copy(dstFile, srcFile)
+ if err != nil {
+ // ignore subsequent errors
+ _ = srcFile.Close()
+ _ = dstFile.Close()
+ return err
+ }
+
+ err = srcFile.Close()
+ if err != nil {
+ // ignore subsequent errors
+ _ = dstFile.Close()
+ return err
+ }
+
+ err = dstFile.Close()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+var diffOutputRegexPatterns = []string{
+ "-.+modfile",
+ "M.+modfile1",
+ "\\+.+modfile2",
+ "\\+.+modfile3",
+ "\\+.+modfile4",
+ "-.+submoddir",
+ "-.+submoddir.subsubmoddir",
+ "\\+.+submoddir2",
+ "\\+.+submoddir2.subsubmoddir",
+ "Files: +2 new, +1 removed, +1 changed",
+ "Dirs: +3 new, +2 removed",
+ "Data Blobs: +2 new, +1 removed",
+ "Added: +7[0-9]{2}\\.[0-9]{3} KiB",
+ "Removed: +2[0-9]{2}\\.[0-9]{3} KiB",
+}
+
+func setupDiffRepo(t *testing.T) (*testEnvironment, func(), string, string) {
+ env, cleanup := withTestEnvironment(t)
+ testRunInit(t, env.gopts)
+
+ datadir := filepath.Join(env.base, "testdata")
+ testdir := filepath.Join(datadir, "testdir")
+ subtestdir := filepath.Join(testdir, "subtestdir")
+ testfile := filepath.Join(testdir, "testfile")
+
+ rtest.OK(t, os.Mkdir(testdir, 0755))
+ rtest.OK(t, os.Mkdir(subtestdir, 0755))
+ rtest.OK(t, appendRandomData(testfile, 256*1024))
+
+ moddir := filepath.Join(datadir, "moddir")
+ submoddir := filepath.Join(moddir, "submoddir")
+ subsubmoddir := filepath.Join(submoddir, "subsubmoddir")
+ modfile := filepath.Join(moddir, "modfile")
+ rtest.OK(t, os.Mkdir(moddir, 0755))
+ rtest.OK(t, os.Mkdir(submoddir, 0755))
+ rtest.OK(t, os.Mkdir(subsubmoddir, 0755))
+ rtest.OK(t, copyFile(modfile, testfile))
+ rtest.OK(t, appendRandomData(modfile+"1", 256*1024))
+
+ snapshots := make(map[string]struct{})
+ opts := BackupOptions{}
+ testRunBackup(t, "", []string{datadir}, opts, env.gopts)
+ snapshots, firstSnapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
+
+ rtest.OK(t, os.Rename(modfile, modfile+"3"))
+ rtest.OK(t, os.Rename(submoddir, submoddir+"2"))
+ rtest.OK(t, appendRandomData(modfile+"1", 256*1024))
+ rtest.OK(t, appendRandomData(modfile+"2", 256*1024))
+ rtest.OK(t, os.Mkdir(modfile+"4", 0755))
+
+ testRunBackup(t, "", []string{datadir}, opts, env.gopts)
+ _, secondSnapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
+
+ return env, cleanup, firstSnapshotID, secondSnapshotID
+}
+
+func TestDiff(t *testing.T) {
+ env, cleanup, firstSnapshotID, secondSnapshotID := setupDiffRepo(t)
+ defer cleanup()
+
+ // quiet suppresses the diff output except for the summary
+ env.gopts.Quiet = false
+ _, err := testRunDiffOutput(env.gopts, "", secondSnapshotID)
+ rtest.Assert(t, err != nil, "expected error on invalid snapshot id")
+
+ out, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID)
+ rtest.OK(t, err)
+
+ for _, pattern := range diffOutputRegexPatterns {
+ r, err := regexp.Compile(pattern)
+ rtest.Assert(t, err == nil, "failed to compile regexp %v", pattern)
+ rtest.Assert(t, r.MatchString(out), "expected pattern %v in output, got\n%v", pattern, out)
+ }
+
+ // check quiet output
+ env.gopts.Quiet = true
+ outQuiet, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID)
+ rtest.OK(t, err)
+
+ rtest.Assert(t, len(outQuiet) < len(out), "expected shorter output on quiet mode %v vs. %v", len(outQuiet), len(out))
+}
+
+type typeSniffer struct {
+ MessageType string `json:"message_type"`
+}
+
+func TestDiffJSON(t *testing.T) {
+ env, cleanup, firstSnapshotID, secondSnapshotID := setupDiffRepo(t)
+ defer cleanup()
+
+ // quiet suppresses the diff output except for the summary
+ env.gopts.Quiet = false
+ env.gopts.JSON = true
+ out, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID)
+ rtest.OK(t, err)
+
+ var stat DiffStatsContainer
+ var changes int
+
+ scanner := bufio.NewScanner(strings.NewReader(out))
+ for scanner.Scan() {
+ line := scanner.Text()
+ var sniffer typeSniffer
+ rtest.OK(t, json.Unmarshal([]byte(line), &sniffer))
+ switch sniffer.MessageType {
+ case "change":
+ changes++
+ case "statistics":
+ rtest.OK(t, json.Unmarshal([]byte(line), &stat))
+ default:
+ t.Fatalf("unexpected message type %v", sniffer.MessageType)
+ }
+ }
+ rtest.Equals(t, 9, changes)
+ rtest.Assert(t, stat.Added.Files == 2 && stat.Added.Dirs == 3 && stat.Added.DataBlobs == 2 &&
+ stat.Removed.Files == 1 && stat.Removed.Dirs == 2 && stat.Removed.DataBlobs == 1 &&
+ stat.ChangedFiles == 1, "unexpected statistics")
+
+ // check quiet output
+ env.gopts.Quiet = true
+ outQuiet, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID)
+ rtest.OK(t, err)
+
+ stat = DiffStatsContainer{}
+ rtest.OK(t, json.Unmarshal([]byte(outQuiet), &stat))
+ rtest.Assert(t, stat.Added.Files == 2 && stat.Added.Dirs == 3 && stat.Added.DataBlobs == 2 &&
+ stat.Removed.Files == 1 && stat.Removed.Dirs == 2 && stat.Removed.DataBlobs == 1 &&
+ stat.ChangedFiles == 1, "unexpected statistics")
+ rtest.Assert(t, stat.SourceSnapshot == firstSnapshotID && stat.TargetSnapshot == secondSnapshotID, "unexpected snapshot ids")
+}
diff --git a/cmd/restic/cmd_dump.go b/cmd/restic/cmd_dump.go
index cda7b65b9..9acae7ca8 100644
--- a/cmd/restic/cmd_dump.go
+++ b/cmd/restic/cmd_dump.go
@@ -132,14 +132,14 @@ func runDump(ctx context.Context, opts DumpOptions, gopts GlobalOptions, args []
if !gopts.NoLock {
var lock *restic.Lock
- lock, ctx, err = lockRepo(ctx, repo)
+ lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
}
- sn, err := (&restic.SnapshotFilter{
+ sn, subfolder, err := (&restic.SnapshotFilter{
Hosts: opts.Hosts,
Paths: opts.Paths,
Tags: opts.Tags,
@@ -153,6 +153,11 @@ func runDump(ctx context.Context, opts DumpOptions, gopts GlobalOptions, args []
return err
}
+ sn.Tree, err = restic.FindTreeDirectory(ctx, repo, sn.Tree, subfolder)
+ if err != nil {
+ return err
+ }
+
tree, err := restic.LoadTree(ctx, repo, *sn.Tree)
if err != nil {
return errors.Fatalf("loading tree for snapshot %q failed: %v", snapshotIDString, err)
diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go
index e5457c3be..181d8595d 100644
--- a/cmd/restic/cmd_find.go
+++ b/cmd/restic/cmd_find.go
@@ -51,6 +51,7 @@ type FindOptions struct {
PackID, ShowPackID bool
CaseInsensitive bool
ListLong bool
+ HumanReadable bool
restic.SnapshotFilter
}
@@ -69,6 +70,7 @@ func init() {
f.BoolVar(&findOptions.ShowPackID, "show-pack-id", false, "display the pack-ID the blobs belong to (with --blob or --tree)")
f.BoolVarP(&findOptions.CaseInsensitive, "ignore-case", "i", false, "ignore case for pattern")
f.BoolVarP(&findOptions.ListLong, "long", "l", false, "use a long listing format showing size and mode")
+ f.BoolVar(&findOptions.HumanReadable, "human-readable", false, "print sizes in human readable format")
initMultiSnapshotFilter(f, &findOptions.SnapshotFilter, true)
}
@@ -104,12 +106,13 @@ func parseTime(str string) (time.Time, error) {
}
type statefulOutput struct {
- ListLong bool
- JSON bool
- inuse bool
- newsn *restic.Snapshot
- oldsn *restic.Snapshot
- hits int
+ ListLong bool
+ HumanReadable bool
+ JSON bool
+ inuse bool
+ newsn *restic.Snapshot
+ oldsn *restic.Snapshot
+ hits int
}
func (s *statefulOutput) PrintPatternJSON(path string, node *restic.Node) {
@@ -164,7 +167,7 @@ func (s *statefulOutput) PrintPatternNormal(path string, node *restic.Node) {
s.oldsn = s.newsn
Verbosef("Found matching entries in snapshot %s from %s\n", s.oldsn.ID().Str(), s.oldsn.Time.Local().Format(TimeFormat))
}
- Println(formatNode(path, node, s.ListLong))
+ Println(formatNode(path, node, s.ListLong, s.HumanReadable))
}
func (s *statefulOutput) PrintPattern(path string, node *restic.Node) {
@@ -501,7 +504,7 @@ func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struc
return packIDs
}
-func (f *Finder) findObjectPack(ctx context.Context, id string, t restic.BlobType) {
+func (f *Finder) findObjectPack(id string, t restic.BlobType) {
idx := f.repo.Index()
rid, err := restic.ParseID(id)
@@ -524,13 +527,13 @@ func (f *Finder) findObjectPack(ctx context.Context, id string, t restic.BlobTyp
}
}
-func (f *Finder) findObjectsPacks(ctx context.Context) {
+func (f *Finder) findObjectsPacks() {
for i := range f.blobIDs {
- f.findObjectPack(ctx, i, restic.DataBlob)
+ f.findObjectPack(i, restic.DataBlob)
}
for i := range f.treeIDs {
- f.findObjectPack(ctx, i, restic.TreeBlob)
+ f.findObjectPack(i, restic.TreeBlob)
}
}
@@ -575,7 +578,7 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args []
if !gopts.NoLock {
var lock *restic.Lock
- lock, ctx, err = lockRepo(ctx, repo)
+ lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
@@ -594,7 +597,7 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args []
f := &Finder{
repo: repo,
pat: pat,
- out: statefulOutput{ListLong: opts.ListLong, JSON: globalOptions.JSON},
+ out: statefulOutput{ListLong: opts.ListLong, HumanReadable: opts.HumanReadable, JSON: gopts.JSON},
ignoreTrees: restic.NewIDSet(),
}
@@ -618,7 +621,16 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args []
}
}
+ var filteredSnapshots []*restic.Snapshot
for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, opts.Snapshots) {
+ filteredSnapshots = append(filteredSnapshots, sn)
+ }
+
+ sort.Slice(filteredSnapshots, func(i, j int) bool {
+ return filteredSnapshots[i].Time.Before(filteredSnapshots[j].Time)
+ })
+
+ for _, sn := range filteredSnapshots {
if f.blobIDs != nil || f.treeIDs != nil {
if err = f.findIDs(ctx, sn); err != nil && err.Error() != "OK" {
return err
@@ -632,7 +644,7 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args []
f.out.Finish()
if opts.ShowPackID && (f.blobIDs != nil || f.treeIDs != nil) {
- f.findObjectsPacks(ctx)
+ f.findObjectsPacks()
}
return nil
diff --git a/cmd/restic/cmd_find_integration_test.go b/cmd/restic/cmd_find_integration_test.go
new file mode 100644
index 000000000..dd8ab87fd
--- /dev/null
+++ b/cmd/restic/cmd_find_integration_test.go
@@ -0,0 +1,87 @@
+package main
+
+import (
+ "context"
+ "encoding/json"
+ "strings"
+ "testing"
+ "time"
+
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func testRunFind(t testing.TB, wantJSON bool, gopts GlobalOptions, pattern string) []byte {
+ buf, err := withCaptureStdout(func() error {
+ gopts.JSON = wantJSON
+
+ opts := FindOptions{}
+ return runFind(context.TODO(), opts, gopts, []string{pattern})
+ })
+ rtest.OK(t, err)
+ return buf.Bytes()
+}
+
+func TestFind(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := testSetupBackupData(t, env)
+ opts := BackupOptions{}
+
+ testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ results := testRunFind(t, false, env.gopts, "unexistingfile")
+ rtest.Assert(t, len(results) == 0, "unexisting file found in repo (%v)", datafile)
+
+ results = testRunFind(t, false, env.gopts, "testfile")
+ lines := strings.Split(string(results), "\n")
+ rtest.Assert(t, len(lines) == 2, "expected one file found in repo (%v)", datafile)
+
+ results = testRunFind(t, false, env.gopts, "testfile*")
+ lines = strings.Split(string(results), "\n")
+ rtest.Assert(t, len(lines) == 4, "expected three files found in repo (%v)", datafile)
+}
+
+type testMatch struct {
+ Path string `json:"path,omitempty"`
+ Permissions string `json:"permissions,omitempty"`
+ Size uint64 `json:"size,omitempty"`
+ Date time.Time `json:"date,omitempty"`
+ UID uint32 `json:"uid,omitempty"`
+ GID uint32 `json:"gid,omitempty"`
+}
+
+type testMatches struct {
+ Hits int `json:"hits,omitempty"`
+ SnapshotID string `json:"snapshot,omitempty"`
+ Matches []testMatch `json:"matches,omitempty"`
+}
+
+func TestFindJSON(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := testSetupBackupData(t, env)
+ opts := BackupOptions{}
+
+ testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ results := testRunFind(t, true, env.gopts, "unexistingfile")
+ matches := []testMatches{}
+ rtest.OK(t, json.Unmarshal(results, &matches))
+ rtest.Assert(t, len(matches) == 0, "expected no match in repo (%v)", datafile)
+
+ results = testRunFind(t, true, env.gopts, "testfile")
+ rtest.OK(t, json.Unmarshal(results, &matches))
+ rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile)
+ rtest.Assert(t, len(matches[0].Matches) == 1, "expected a single file to match (%v)", datafile)
+ rtest.Assert(t, matches[0].Hits == 1, "expected hits to show 1 match (%v)", datafile)
+
+ results = testRunFind(t, true, env.gopts, "testfile*")
+ rtest.OK(t, json.Unmarshal(results, &matches))
+ rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile)
+ rtest.Assert(t, len(matches[0].Matches) == 3, "expected 3 files to match (%v)", datafile)
+ rtest.Assert(t, matches[0].Hits == 3, "expected hits to show 3 matches (%v)", datafile)
+}
diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go
index e4e44a368..22398b806 100644
--- a/cmd/restic/cmd_forget.go
+++ b/cmd/restic/cmd_forget.go
@@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"io"
+ "strconv"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
@@ -36,14 +37,49 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
},
}
+type ForgetPolicyCount int
+
+var ErrNegativePolicyCount = errors.New("negative values not allowed, use 'unlimited' instead")
+
+func (c *ForgetPolicyCount) Set(s string) error {
+ switch s {
+ case "unlimited":
+ *c = -1
+ default:
+ val, err := strconv.ParseInt(s, 10, 0)
+ if err != nil {
+ return err
+ }
+ if val < 0 {
+ return ErrNegativePolicyCount
+ }
+ *c = ForgetPolicyCount(val)
+ }
+
+ return nil
+}
+
+func (c *ForgetPolicyCount) String() string {
+ switch *c {
+ case -1:
+ return "unlimited"
+ default:
+ return strconv.FormatInt(int64(*c), 10)
+ }
+}
+
+func (c *ForgetPolicyCount) Type() string {
+ return "n"
+}
+
// ForgetOptions collects all options for the forget command.
type ForgetOptions struct {
- Last int
- Hourly int
- Daily int
- Weekly int
- Monthly int
- Yearly int
+ Last ForgetPolicyCount
+ Hourly ForgetPolicyCount
+ Daily ForgetPolicyCount
+ Weekly ForgetPolicyCount
+ Monthly ForgetPolicyCount
+ Yearly ForgetPolicyCount
Within restic.Duration
WithinHourly restic.Duration
WithinDaily restic.Duration
@@ -56,7 +92,7 @@ type ForgetOptions struct {
Compact bool
// Grouping
- GroupBy string
+ GroupBy restic.SnapshotGroupByOptions
DryRun bool
Prune bool
}
@@ -67,12 +103,12 @@ func init() {
cmdRoot.AddCommand(cmdForget)
f := cmdForget.Flags()
- f.IntVarP(&forgetOptions.Last, "keep-last", "l", 0, "keep the last `n` snapshots")
- f.IntVarP(&forgetOptions.Hourly, "keep-hourly", "H", 0, "keep the last `n` hourly snapshots")
- f.IntVarP(&forgetOptions.Daily, "keep-daily", "d", 0, "keep the last `n` daily snapshots")
- f.IntVarP(&forgetOptions.Weekly, "keep-weekly", "w", 0, "keep the last `n` weekly snapshots")
- f.IntVarP(&forgetOptions.Monthly, "keep-monthly", "m", 0, "keep the last `n` monthly snapshots")
- f.IntVarP(&forgetOptions.Yearly, "keep-yearly", "y", 0, "keep the last `n` yearly snapshots")
+ f.VarP(&forgetOptions.Last, "keep-last", "l", "keep the last `n` snapshots (use 'unlimited' to keep all snapshots)")
+ f.VarP(&forgetOptions.Hourly, "keep-hourly", "H", "keep the last `n` hourly snapshots (use 'unlimited' to keep all hourly snapshots)")
+ f.VarP(&forgetOptions.Daily, "keep-daily", "d", "keep the last `n` daily snapshots (use 'unlimited' to keep all daily snapshots)")
+ f.VarP(&forgetOptions.Weekly, "keep-weekly", "w", "keep the last `n` weekly snapshots (use 'unlimited' to keep all weekly snapshots)")
+ f.VarP(&forgetOptions.Monthly, "keep-monthly", "m", "keep the last `n` monthly snapshots (use 'unlimited' to keep all monthly snapshots)")
+ f.VarP(&forgetOptions.Yearly, "keep-yearly", "y", "keep the last `n` yearly snapshots (use 'unlimited' to keep all yearly snapshots)")
f.VarP(&forgetOptions.Within, "keep-within", "", "keep snapshots that are newer than `duration` (eg. 1y5m7d2h) relative to the latest snapshot")
f.VarP(&forgetOptions.WithinHourly, "keep-within-hourly", "", "keep hourly snapshots that are newer than `duration` (eg. 1y5m7d2h) relative to the latest snapshot")
f.VarP(&forgetOptions.WithinDaily, "keep-within-daily", "", "keep daily snapshots that are newer than `duration` (eg. 1y5m7d2h) relative to the latest snapshot")
@@ -90,8 +126,8 @@ func init() {
}
f.BoolVarP(&forgetOptions.Compact, "compact", "c", false, "use compact output format")
-
- f.StringVarP(&forgetOptions.GroupBy, "group-by", "g", "host,paths", "`group` snapshots by host, paths and/or tags, separated by comma (disable grouping with '')")
+ forgetOptions.GroupBy = restic.SnapshotGroupByOptions{Host: true, Path: true}
+ f.VarP(&forgetOptions.GroupBy, "group-by", "g", "`group` snapshots by host, paths and/or tags, separated by comma (disable grouping with '')")
f.BoolVarP(&forgetOptions.DryRun, "dry-run", "n", false, "do not delete anything, just print what would be done")
f.BoolVar(&forgetOptions.Prune, "prune", false, "automatically run the 'prune' command if snapshots have been removed")
@@ -99,8 +135,29 @@ func init() {
addPruneOptions(cmdForget)
}
+func verifyForgetOptions(opts *ForgetOptions) error {
+ if opts.Last < -1 || opts.Hourly < -1 || opts.Daily < -1 || opts.Weekly < -1 ||
+ opts.Monthly < -1 || opts.Yearly < -1 {
+ return errors.Fatal("negative values other than -1 are not allowed for --keep-*")
+ }
+
+ for _, d := range []restic.Duration{opts.Within, opts.WithinHourly, opts.WithinDaily,
+ opts.WithinMonthly, opts.WithinWeekly, opts.WithinYearly} {
+ if d.Hours < 0 || d.Days < 0 || d.Months < 0 || d.Years < 0 {
+ return errors.Fatal("durations containing negative values are not allowed for --keep-within*")
+ }
+ }
+
+ return nil
+}
+
func runForget(ctx context.Context, opts ForgetOptions, gopts GlobalOptions, args []string) error {
- err := verifyPruneOptions(&pruneOptions)
+ err := verifyForgetOptions(&opts)
+ if err != nil {
+ return err
+ }
+
+ err = verifyPruneOptions(&pruneOptions)
if err != nil {
return err
}
@@ -116,7 +173,7 @@ func runForget(ctx context.Context, opts ForgetOptions, gopts GlobalOptions, arg
if !opts.DryRun || !gopts.NoLock {
var lock *restic.Lock
- lock, ctx, err = lockRepoExclusive(ctx, repo)
+ lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
@@ -144,12 +201,12 @@ func runForget(ctx context.Context, opts ForgetOptions, gopts GlobalOptions, arg
}
policy := restic.ExpirePolicy{
- Last: opts.Last,
- Hourly: opts.Hourly,
- Daily: opts.Daily,
- Weekly: opts.Weekly,
- Monthly: opts.Monthly,
- Yearly: opts.Yearly,
+ Last: int(opts.Last),
+ Hourly: int(opts.Hourly),
+ Daily: int(opts.Daily),
+ Weekly: int(opts.Weekly),
+ Monthly: int(opts.Monthly),
+ Yearly: int(opts.Yearly),
Within: opts.Within,
WithinHourly: opts.WithinHourly,
WithinDaily: opts.WithinDaily,
@@ -172,7 +229,7 @@ func runForget(ctx context.Context, opts ForgetOptions, gopts GlobalOptions, arg
for k, snapshotGroup := range snapshotGroups {
if gopts.Verbose >= 1 && !gopts.JSON {
- err = PrintSnapshotGroupHeader(gopts.stdout, k)
+ err = PrintSnapshotGroupHeader(globalOptions.stdout, k)
if err != nil {
return err
}
@@ -229,7 +286,7 @@ func runForget(ctx context.Context, opts ForgetOptions, gopts GlobalOptions, arg
}
if gopts.JSON && len(jsonGroups) > 0 {
- err = printJSONForget(gopts.stdout, jsonGroups)
+ err = printJSONForget(globalOptions.stdout, jsonGroups)
if err != nil {
return err
}
diff --git a/cmd/restic/cmd_forget_integration_test.go b/cmd/restic/cmd_forget_integration_test.go
new file mode 100644
index 000000000..8908d5a5f
--- /dev/null
+++ b/cmd/restic/cmd_forget_integration_test.go
@@ -0,0 +1,13 @@
+package main
+
+import (
+ "context"
+ "testing"
+
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) {
+ opts := ForgetOptions{}
+ rtest.OK(t, runForget(context.TODO(), opts, gopts, args))
+}
diff --git a/cmd/restic/cmd_forget_test.go b/cmd/restic/cmd_forget_test.go
new file mode 100644
index 000000000..ddeef028a
--- /dev/null
+++ b/cmd/restic/cmd_forget_test.go
@@ -0,0 +1,94 @@
+package main
+
+import (
+ "testing"
+
+ "github.com/restic/restic/internal/restic"
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func TestForgetPolicyValues(t *testing.T) {
+ testCases := []struct {
+ input string
+ value ForgetPolicyCount
+ err string
+ }{
+ {"0", ForgetPolicyCount(0), ""},
+ {"1", ForgetPolicyCount(1), ""},
+ {"unlimited", ForgetPolicyCount(-1), ""},
+ {"", ForgetPolicyCount(0), "strconv.ParseInt: parsing \"\": invalid syntax"},
+ {"-1", ForgetPolicyCount(0), ErrNegativePolicyCount.Error()},
+ {"abc", ForgetPolicyCount(0), "strconv.ParseInt: parsing \"abc\": invalid syntax"},
+ }
+ for _, testCase := range testCases {
+ t.Run("", func(t *testing.T) {
+ var count ForgetPolicyCount
+ err := count.Set(testCase.input)
+
+ if testCase.err != "" {
+ rtest.Assert(t, err != nil, "should have returned error for input %+v", testCase.input)
+ rtest.Equals(t, testCase.err, err.Error())
+ } else {
+ rtest.Assert(t, err == nil, "expected no error for input %+v, got %v", testCase.input, err)
+ rtest.Equals(t, testCase.value, count)
+ rtest.Equals(t, testCase.input, count.String())
+ }
+ })
+ }
+}
+
+func TestForgetOptionValues(t *testing.T) {
+ const negValErrorMsg = "Fatal: negative values other than -1 are not allowed for --keep-*"
+ const negDurationValErrorMsg = "Fatal: durations containing negative values are not allowed for --keep-within*"
+ testCases := []struct {
+ input ForgetOptions
+ errorMsg string
+ }{
+ {ForgetOptions{Last: 1}, ""},
+ {ForgetOptions{Hourly: 1}, ""},
+ {ForgetOptions{Daily: 1}, ""},
+ {ForgetOptions{Weekly: 1}, ""},
+ {ForgetOptions{Monthly: 1}, ""},
+ {ForgetOptions{Yearly: 1}, ""},
+ {ForgetOptions{Last: 0}, ""},
+ {ForgetOptions{Hourly: 0}, ""},
+ {ForgetOptions{Daily: 0}, ""},
+ {ForgetOptions{Weekly: 0}, ""},
+ {ForgetOptions{Monthly: 0}, ""},
+ {ForgetOptions{Yearly: 0}, ""},
+ {ForgetOptions{Last: -1}, ""},
+ {ForgetOptions{Hourly: -1}, ""},
+ {ForgetOptions{Daily: -1}, ""},
+ {ForgetOptions{Weekly: -1}, ""},
+ {ForgetOptions{Monthly: -1}, ""},
+ {ForgetOptions{Yearly: -1}, ""},
+ {ForgetOptions{Last: -2}, negValErrorMsg},
+ {ForgetOptions{Hourly: -2}, negValErrorMsg},
+ {ForgetOptions{Daily: -2}, negValErrorMsg},
+ {ForgetOptions{Weekly: -2}, negValErrorMsg},
+ {ForgetOptions{Monthly: -2}, negValErrorMsg},
+ {ForgetOptions{Yearly: -2}, negValErrorMsg},
+ {ForgetOptions{Within: restic.ParseDurationOrPanic("1y2m3d3h")}, ""},
+ {ForgetOptions{WithinHourly: restic.ParseDurationOrPanic("1y2m3d3h")}, ""},
+ {ForgetOptions{WithinDaily: restic.ParseDurationOrPanic("1y2m3d3h")}, ""},
+ {ForgetOptions{WithinWeekly: restic.ParseDurationOrPanic("1y2m3d3h")}, ""},
+ {ForgetOptions{WithinMonthly: restic.ParseDurationOrPanic("2y4m6d8h")}, ""},
+ {ForgetOptions{WithinYearly: restic.ParseDurationOrPanic("2y4m6d8h")}, ""},
+ {ForgetOptions{Within: restic.ParseDurationOrPanic("-1y2m3d3h")}, negDurationValErrorMsg},
+ {ForgetOptions{WithinHourly: restic.ParseDurationOrPanic("1y-2m3d3h")}, negDurationValErrorMsg},
+ {ForgetOptions{WithinDaily: restic.ParseDurationOrPanic("1y2m-3d3h")}, negDurationValErrorMsg},
+ {ForgetOptions{WithinWeekly: restic.ParseDurationOrPanic("1y2m3d-3h")}, negDurationValErrorMsg},
+ {ForgetOptions{WithinMonthly: restic.ParseDurationOrPanic("-2y4m6d8h")}, negDurationValErrorMsg},
+ {ForgetOptions{WithinYearly: restic.ParseDurationOrPanic("2y-4m6d8h")}, negDurationValErrorMsg},
+ }
+
+ for _, testCase := range testCases {
+ err := verifyForgetOptions(&testCase.input)
+ if testCase.errorMsg != "" {
+ rtest.Assert(t, err != nil, "should have returned error for input %+v", testCase.input)
+ rtest.Equals(t, testCase.errorMsg, err.Error())
+ } else {
+ rtest.Assert(t, err == nil, "expected no error for input %+v", testCase.input)
+ }
+ }
+}
diff --git a/cmd/restic/cmd_generate.go b/cmd/restic/cmd_generate.go
index 959a9d518..b284767ca 100644
--- a/cmd/restic/cmd_generate.go
+++ b/cmd/restic/cmd_generate.go
@@ -63,26 +63,38 @@ func writeManpages(dir string) error {
}
func writeBashCompletion(file string) error {
- Verbosef("writing bash completion file to %v\n", file)
+ if stdoutIsTerminal() {
+ Verbosef("writing bash completion file to %v\n", file)
+ }
return cmdRoot.GenBashCompletionFile(file)
}
func writeFishCompletion(file string) error {
- Verbosef("writing fish completion file to %v\n", file)
+ if stdoutIsTerminal() {
+ Verbosef("writing fish completion file to %v\n", file)
+ }
return cmdRoot.GenFishCompletionFile(file, true)
}
func writeZSHCompletion(file string) error {
- Verbosef("writing zsh completion file to %v\n", file)
+ if stdoutIsTerminal() {
+ Verbosef("writing zsh completion file to %v\n", file)
+ }
return cmdRoot.GenZshCompletionFile(file)
}
func writePowerShellCompletion(file string) error {
- Verbosef("writing powershell completion file to %v\n", file)
+ if stdoutIsTerminal() {
+ Verbosef("writing powershell completion file to %v\n", file)
+ }
return cmdRoot.GenPowerShellCompletionFile(file)
}
-func runGenerate(cmd *cobra.Command, args []string) error {
+func runGenerate(_ *cobra.Command, args []string) error {
+ if len(args) > 0 {
+ return errors.Fatal("the generate command expects no arguments, only options - please see `restic help generate` for usage and flags")
+ }
+
if genOpts.ManDir != "" {
err := writeManpages(genOpts.ManDir)
if err != nil {
diff --git a/cmd/restic/cmd_init.go b/cmd/restic/cmd_init.go
index 2932870e8..b9dabdc2d 100644
--- a/cmd/restic/cmd_init.go
+++ b/cmd/restic/cmd_init.go
@@ -50,6 +50,10 @@ func init() {
}
func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []string) error {
+ if len(args) > 0 {
+ return errors.Fatal("the init command expects no arguments, only options - please see `restic help init` for usage and flags")
+ }
+
var version uint
if opts.RepositoryVersion == "latest" || opts.RepositoryVersion == "" {
version = restic.MaxRepoVersion
@@ -83,9 +87,9 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []
return err
}
- be, err := create(ctx, repo, gopts.extended)
+ be, err := create(ctx, repo, gopts, gopts.extended)
if err != nil {
- return errors.Fatalf("create repository at %s failed: %v\n", location.StripPassword(gopts.Repo), err)
+ return errors.Fatalf("create repository at %s failed: %v\n", location.StripPassword(gopts.backends, gopts.Repo), err)
}
s, err := repository.New(be, repository.Options{
@@ -93,16 +97,21 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []
PackSize: gopts.PackSize * 1024 * 1024,
})
if err != nil {
- return err
+ return errors.Fatal(err.Error())
}
err = s.Init(ctx, version, gopts.password, chunkerPolynomial)
if err != nil {
- return errors.Fatalf("create key in repository at %s failed: %v\n", location.StripPassword(gopts.Repo), err)
+ return errors.Fatalf("create key in repository at %s failed: %v\n", location.StripPassword(gopts.backends, gopts.Repo), err)
}
if !gopts.JSON {
- Verbosef("created restic repository %v at %s\n", s.Config().ID[:10], location.StripPassword(gopts.Repo))
+ Verbosef("created restic repository %v at %s", s.Config().ID[:10], location.StripPassword(gopts.backends, gopts.Repo))
+ if opts.CopyChunkerParameters && chunkerPolynomial != nil {
+ Verbosef(" with chunker parameters copied from secondary repository\n")
+ } else {
+ Verbosef("\n")
+ }
Verbosef("\n")
Verbosef("Please note that knowledge of your password is required to access\n")
Verbosef("the repository. Losing your password means that your data is\n")
@@ -112,9 +121,9 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []
status := initSuccess{
MessageType: "initialized",
ID: s.Config().ID,
- Repository: location.StripPassword(gopts.Repo),
+ Repository: location.StripPassword(gopts.backends, gopts.Repo),
}
- return json.NewEncoder(gopts.stdout).Encode(status)
+ return json.NewEncoder(globalOptions.stdout).Encode(status)
}
return nil
diff --git a/cmd/restic/cmd_init_integration_test.go b/cmd/restic/cmd_init_integration_test.go
new file mode 100644
index 000000000..9b5eed6e0
--- /dev/null
+++ b/cmd/restic/cmd_init_integration_test.go
@@ -0,0 +1,49 @@
+package main
+
+import (
+ "context"
+ "testing"
+
+ "github.com/restic/restic/internal/repository"
+ "github.com/restic/restic/internal/restic"
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func testRunInit(t testing.TB, opts GlobalOptions) {
+ repository.TestUseLowSecurityKDFParameters(t)
+ restic.TestDisableCheckPolynomial(t)
+ restic.TestSetLockTimeout(t, 0)
+
+ rtest.OK(t, runInit(context.TODO(), InitOptions{}, opts, nil))
+ t.Logf("repository initialized at %v", opts.Repo)
+}
+
+func TestInitCopyChunkerParams(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+ env2, cleanup2 := withTestEnvironment(t)
+ defer cleanup2()
+
+ testRunInit(t, env2.gopts)
+
+ initOpts := InitOptions{
+ secondaryRepoOptions: secondaryRepoOptions{
+ Repo: env2.gopts.Repo,
+ password: env2.gopts.password,
+ },
+ }
+ rtest.Assert(t, runInit(context.TODO(), initOpts, env.gopts, nil) != nil, "expected invalid init options to fail")
+
+ initOpts.CopyChunkerParameters = true
+ rtest.OK(t, runInit(context.TODO(), initOpts, env.gopts, nil))
+
+ repo, err := OpenRepository(context.TODO(), env.gopts)
+ rtest.OK(t, err)
+
+ otherRepo, err := OpenRepository(context.TODO(), env2.gopts)
+ rtest.OK(t, err)
+
+ rtest.Assert(t, repo.Config().ChunkerPolynomial == otherRepo.Config().ChunkerPolynomial,
+ "expected equal chunker polynomials, got %v expected %v", repo.Config().ChunkerPolynomial,
+ otherRepo.Config().ChunkerPolynomial)
+}
diff --git a/cmd/restic/cmd_key.go b/cmd/restic/cmd_key.go
index 88b6d5c0c..62521d762 100644
--- a/cmd/restic/cmd_key.go
+++ b/cmd/restic/cmd_key.go
@@ -212,7 +212,7 @@ func runKey(ctx context.Context, gopts GlobalOptions, args []string) error {
switch args[0] {
case "list":
- lock, ctx, err := lockRepo(ctx, repo)
+ lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
@@ -220,7 +220,7 @@ func runKey(ctx context.Context, gopts GlobalOptions, args []string) error {
return listKeys(ctx, repo, gopts)
case "add":
- lock, ctx, err := lockRepo(ctx, repo)
+ lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
@@ -228,7 +228,7 @@ func runKey(ctx context.Context, gopts GlobalOptions, args []string) error {
return addKey(ctx, repo, gopts)
case "remove":
- lock, ctx, err := lockRepoExclusive(ctx, repo)
+ lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
@@ -241,7 +241,7 @@ func runKey(ctx context.Context, gopts GlobalOptions, args []string) error {
return deleteKey(ctx, repo, id)
case "passwd":
- lock, ctx, err := lockRepoExclusive(ctx, repo)
+ lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
diff --git a/cmd/restic/cmd_key_integration_test.go b/cmd/restic/cmd_key_integration_test.go
new file mode 100644
index 000000000..9ea5795ba
--- /dev/null
+++ b/cmd/restic/cmd_key_integration_test.go
@@ -0,0 +1,145 @@
+package main
+
+import (
+ "bufio"
+ "context"
+ "regexp"
+ "testing"
+
+ "github.com/restic/restic/internal/repository"
+ "github.com/restic/restic/internal/restic"
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func testRunKeyListOtherIDs(t testing.TB, gopts GlobalOptions) []string {
+ buf, err := withCaptureStdout(func() error {
+ return runKey(context.TODO(), gopts, []string{"list"})
+ })
+ rtest.OK(t, err)
+
+ scanner := bufio.NewScanner(buf)
+ exp := regexp.MustCompile(`^ ([a-f0-9]+) `)
+
+ IDs := []string{}
+ for scanner.Scan() {
+ if id := exp.FindStringSubmatch(scanner.Text()); id != nil {
+ IDs = append(IDs, id[1])
+ }
+ }
+
+ return IDs
+}
+
+func testRunKeyAddNewKey(t testing.TB, newPassword string, gopts GlobalOptions) {
+ testKeyNewPassword = newPassword
+ defer func() {
+ testKeyNewPassword = ""
+ }()
+
+ rtest.OK(t, runKey(context.TODO(), gopts, []string{"add"}))
+}
+
+func testRunKeyAddNewKeyUserHost(t testing.TB, gopts GlobalOptions) {
+ testKeyNewPassword = "john's geheimnis"
+ defer func() {
+ testKeyNewPassword = ""
+ keyUsername = ""
+ keyHostname = ""
+ }()
+
+ rtest.OK(t, cmdKey.Flags().Parse([]string{"--user=john", "--host=example.com"}))
+
+ t.Log("adding key for john@example.com")
+ rtest.OK(t, runKey(context.TODO(), gopts, []string{"add"}))
+
+ repo, err := OpenRepository(context.TODO(), gopts)
+ rtest.OK(t, err)
+ key, err := repository.SearchKey(context.TODO(), repo, testKeyNewPassword, 2, "")
+ rtest.OK(t, err)
+
+ rtest.Equals(t, "john", key.Username)
+ rtest.Equals(t, "example.com", key.Hostname)
+}
+
+func testRunKeyPasswd(t testing.TB, newPassword string, gopts GlobalOptions) {
+ testKeyNewPassword = newPassword
+ defer func() {
+ testKeyNewPassword = ""
+ }()
+
+ rtest.OK(t, runKey(context.TODO(), gopts, []string{"passwd"}))
+}
+
+func testRunKeyRemove(t testing.TB, gopts GlobalOptions, IDs []string) {
+ t.Logf("remove %d keys: %q\n", len(IDs), IDs)
+ for _, id := range IDs {
+ rtest.OK(t, runKey(context.TODO(), gopts, []string{"remove", id}))
+ }
+}
+
+func TestKeyAddRemove(t *testing.T) {
+ passwordList := []string{
+ "OnnyiasyatvodsEvVodyawit",
+ "raicneirvOjEfEigonOmLasOd",
+ }
+
+ env, cleanup := withTestEnvironment(t)
+ // must list keys more than once
+ env.gopts.backendTestHook = nil
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ testRunKeyPasswd(t, "geheim2", env.gopts)
+ env.gopts.password = "geheim2"
+ t.Logf("changed password to %q", env.gopts.password)
+
+ for _, newPassword := range passwordList {
+ testRunKeyAddNewKey(t, newPassword, env.gopts)
+ t.Logf("added new password %q", newPassword)
+ env.gopts.password = newPassword
+ testRunKeyRemove(t, env.gopts, testRunKeyListOtherIDs(t, env.gopts))
+ }
+
+ env.gopts.password = passwordList[len(passwordList)-1]
+ t.Logf("testing access with last password %q\n", env.gopts.password)
+ rtest.OK(t, runKey(context.TODO(), env.gopts, []string{"list"}))
+ testRunCheck(t, env.gopts)
+
+ testRunKeyAddNewKeyUserHost(t, env.gopts)
+}
+
+type emptySaveBackend struct {
+ restic.Backend
+}
+
+func (b *emptySaveBackend) Save(ctx context.Context, h restic.Handle, _ restic.RewindReader) error {
+ return b.Backend.Save(ctx, h, restic.NewByteReader([]byte{}, nil))
+}
+
+func TestKeyProblems(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+ env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) {
+ return &emptySaveBackend{r}, nil
+ }
+
+ testKeyNewPassword = "geheim2"
+ defer func() {
+ testKeyNewPassword = ""
+ }()
+
+ err := runKey(context.TODO(), env.gopts, []string{"passwd"})
+ t.Log(err)
+ rtest.Assert(t, err != nil, "expected passwd change to fail")
+
+ err = runKey(context.TODO(), env.gopts, []string{"add"})
+ t.Log(err)
+ rtest.Assert(t, err != nil, "expected key adding to fail")
+
+ t.Logf("testing access with initial password %q\n", env.gopts.password)
+ rtest.OK(t, runKey(context.TODO(), env.gopts, []string{"list"}))
+ testRunCheck(t, env.gopts)
+}
diff --git a/cmd/restic/cmd_list.go b/cmd/restic/cmd_list.go
index 4809092c0..bd02cedc7 100644
--- a/cmd/restic/cmd_list.go
+++ b/cmd/restic/cmd_list.go
@@ -31,19 +31,19 @@ func init() {
cmdRoot.AddCommand(cmdList)
}
-func runList(ctx context.Context, cmd *cobra.Command, opts GlobalOptions, args []string) error {
+func runList(ctx context.Context, cmd *cobra.Command, gopts GlobalOptions, args []string) error {
if len(args) != 1 {
return errors.Fatal("type not specified, usage: " + cmd.Use)
}
- repo, err := OpenRepository(ctx, opts)
+ repo, err := OpenRepository(ctx, gopts)
if err != nil {
return err
}
- if !opts.NoLock && args[0] != "locks" {
+ if !gopts.NoLock && args[0] != "locks" {
var lock *restic.Lock
- lock, ctx, err = lockRepo(ctx, repo)
+ lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
diff --git a/cmd/restic/cmd_list_integration_test.go b/cmd/restic/cmd_list_integration_test.go
new file mode 100644
index 000000000..4140a3ea8
--- /dev/null
+++ b/cmd/restic/cmd_list_integration_test.go
@@ -0,0 +1,44 @@
+package main
+
+import (
+ "bufio"
+ "context"
+ "io"
+ "testing"
+
+ "github.com/restic/restic/internal/restic"
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func testRunList(t testing.TB, tpe string, opts GlobalOptions) restic.IDs {
+ buf, err := withCaptureStdout(func() error {
+ return runList(context.TODO(), cmdList, opts, []string{tpe})
+ })
+ rtest.OK(t, err)
+ return parseIDsFromReader(t, buf)
+}
+
+func parseIDsFromReader(t testing.TB, rd io.Reader) restic.IDs {
+ t.Helper()
+ IDs := restic.IDs{}
+ sc := bufio.NewScanner(rd)
+
+ for sc.Scan() {
+ id, err := restic.ParseID(sc.Text())
+ if err != nil {
+ t.Logf("parse id %v: %v", sc.Text(), err)
+ continue
+ }
+
+ IDs = append(IDs, id)
+ }
+
+ return IDs
+}
+
+func testListSnapshots(t testing.TB, opts GlobalOptions, expected int) restic.IDs {
+ t.Helper()
+ snapshotIDs := testRunList(t, "snapshots", opts)
+ rtest.Assert(t, len(snapshotIDs) == expected, "expected %v snapshot, got %v", expected, snapshotIDs)
+ return snapshotIDs
+}
diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go
index aeaa750eb..256c9e002 100644
--- a/cmd/restic/cmd_ls.go
+++ b/cmd/restic/cmd_ls.go
@@ -50,7 +50,8 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
type LsOptions struct {
ListLong bool
restic.SnapshotFilter
- Recursive bool
+ Recursive bool
+ HumanReadable bool
}
var lsOptions LsOptions
@@ -62,6 +63,7 @@ func init() {
initSingleSnapshotFilter(flags, &lsOptions.SnapshotFilter)
flags.BoolVarP(&lsOptions.ListLong, "long", "l", false, "use a long listing format showing size and mode")
flags.BoolVar(&lsOptions.Recursive, "recursive", false, "include files in subfolders of the listed directories")
+ flags.BoolVar(&lsOptions.HumanReadable, "human-readable", false, "print sizes in human readable format")
}
type lsSnapshot struct {
@@ -181,7 +183,7 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri
)
if gopts.JSON {
- enc := json.NewEncoder(gopts.stdout)
+ enc := json.NewEncoder(globalOptions.stdout)
printSnapshot = func(sn *restic.Snapshot) {
err := enc.Encode(lsSnapshot{
@@ -206,11 +208,11 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri
Verbosef("snapshot %s of %v filtered by %v at %s):\n", sn.ID().Str(), sn.Paths, dirs, sn.Time)
}
printNode = func(path string, node *restic.Node) {
- Printf("%s\n", formatNode(path, node, lsOptions.ListLong))
+ Printf("%s\n", formatNode(path, node, lsOptions.ListLong, lsOptions.HumanReadable))
}
}
- sn, err := (&restic.SnapshotFilter{
+ sn, subfolder, err := (&restic.SnapshotFilter{
Hosts: opts.Hosts,
Paths: opts.Paths,
Tags: opts.Tags,
@@ -219,6 +221,11 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri
return err
}
+ sn.Tree, err = restic.FindTreeDirectory(ctx, repo, sn.Tree, subfolder)
+ if err != nil {
+ return err
+ }
+
printSnapshot(sn)
err = walker.Walk(ctx, repo, *sn.Tree, nil, func(_ restic.ID, nodepath string, node *restic.Node, err error) (bool, error) {
diff --git a/cmd/restic/cmd_ls_integration_test.go b/cmd/restic/cmd_ls_integration_test.go
new file mode 100644
index 000000000..39bf9c3b0
--- /dev/null
+++ b/cmd/restic/cmd_ls_integration_test.go
@@ -0,0 +1,19 @@
+package main
+
+import (
+ "context"
+ "strings"
+ "testing"
+
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func testRunLs(t testing.TB, gopts GlobalOptions, snapshotID string) []string {
+ buf, err := withCaptureStdout(func() error {
+ gopts.Quiet = true
+ opts := LsOptions{}
+ return runLs(context.TODO(), opts, gopts, []string{snapshotID})
+ })
+ rtest.OK(t, err)
+ return strings.Split(buf.String(), "\n")
+}
diff --git a/cmd/restic/cmd_migrate.go b/cmd/restic/cmd_migrate.go
index 6d614be39..fd2e762c0 100644
--- a/cmd/restic/cmd_migrate.go
+++ b/cmd/restic/cmd_migrate.go
@@ -122,7 +122,7 @@ func runMigrate(ctx context.Context, opts MigrateOptions, gopts GlobalOptions, a
return err
}
- lock, ctx, err := lockRepoExclusive(ctx, repo)
+ lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
diff --git a/cmd/restic/cmd_mount.go b/cmd/restic/cmd_mount.go
index 0501bfe89..ec3662d5c 100644
--- a/cmd/restic/cmd_mount.go
+++ b/cmd/restic/cmd_mount.go
@@ -123,7 +123,7 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args
if !gopts.NoLock {
var lock *restic.Lock
- lock, ctx, err = lockRepo(ctx, repo)
+ lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
diff --git a/cmd/restic/integration_fuse_test.go b/cmd/restic/cmd_mount_integration_test.go
index a99064b8f..1b069d582 100644
--- a/cmd/restic/integration_fuse_test.go
+++ b/cmd/restic/cmd_mount_integration_test.go
@@ -12,6 +12,7 @@ import (
"testing"
"time"
+ "github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
@@ -63,7 +64,7 @@ func testRunMount(t testing.TB, gopts GlobalOptions, dir string, wg *sync.WaitGr
rtest.OK(t, runMount(context.TODO(), opts, gopts, []string{dir}))
}
-func testRunUmount(t testing.TB, gopts GlobalOptions, dir string) {
+func testRunUmount(t testing.TB, dir string) {
var err error
for i := 0; i < mountWait; i++ {
if err = umount(dir); err == nil {
@@ -94,7 +95,7 @@ func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Reposit
go testRunMount(t, global, mountpoint, &wg)
waitForMount(t, mountpoint)
defer wg.Wait()
- defer testRunUmount(t, global, mountpoint)
+ defer testRunUmount(t, mountpoint)
if !snapshotsDirExists(t, mountpoint) {
t.Fatal(`virtual directory "snapshots" doesn't exist`)
@@ -159,6 +160,11 @@ func TestMount(t *testing.T) {
t.Skip("Skipping fuse tests")
}
+ debugEnabled := debug.TestLogToStderr(t)
+ if debugEnabled {
+ defer debug.TestDisableLog(t)
+ }
+
env, cleanup := withTestEnvironment(t)
// must list snapshots more than once
env.gopts.backendTestHook = nil
diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go
index f59be2967..e4c2c7b29 100644
--- a/cmd/restic/cmd_prune.go
+++ b/cmd/restic/cmd_prune.go
@@ -3,6 +3,7 @@ package main
import (
"context"
"math"
+ "runtime"
"sort"
"strconv"
"strings"
@@ -80,7 +81,7 @@ func addPruneOptions(c *cobra.Command) {
func verifyPruneOptions(opts *PruneOptions) error {
opts.MaxRepackBytes = math.MaxUint64
if len(opts.MaxRepackSize) > 0 {
- size, err := parseSizeStr(opts.MaxRepackSize)
+ size, err := ui.ParseBytes(opts.MaxRepackSize)
if err != nil {
return err
}
@@ -123,7 +124,7 @@ func verifyPruneOptions(opts *PruneOptions) error {
}
default:
- size, err := parseSizeStr(maxUnused)
+ size, err := ui.ParseBytes(maxUnused)
if err != nil {
return errors.Fatalf("invalid number of bytes %q for --max-unused: %v", opts.MaxUnused, err)
}
@@ -167,7 +168,7 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error
opts.unsafeRecovery = true
}
- lock, ctx, err := lockRepoExclusive(ctx, repo)
+ lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
@@ -205,6 +206,9 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption
return err
}
+ // Trigger GC to reset garbage collection threshold
+ runtime.GC()
+
return doPrune(ctx, opts, gopts, repo, plan)
}
@@ -488,7 +492,7 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi
// Pack size does not fit and pack is needed => error
// If the pack is not needed, this is no error, the pack can
// and will be simply removed, see below.
- Warnf("pack %s: calculated size %d does not match real size %d\nRun 'restic rebuild-index'.\n",
+ Warnf("pack %s: calculated size %d does not match real size %d\nRun 'restic repair index'.\n",
id.Str(), p.unusedSize+p.usedSize, packSize)
return errorSizeNotMatching
}
@@ -729,7 +733,7 @@ func doPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo r
_, err := repository.Repack(ctx, repo, repo, plan.repackPacks, plan.keepBlobs, bar)
bar.Done()
if err != nil {
- return errors.Fatalf("%s", err)
+ return errors.Fatal(err.Error())
}
// Also remove repacked packs
diff --git a/cmd/restic/cmd_prune_integration_test.go b/cmd/restic/cmd_prune_integration_test.go
new file mode 100644
index 000000000..2cd86d895
--- /dev/null
+++ b/cmd/restic/cmd_prune_integration_test.go
@@ -0,0 +1,221 @@
+package main
+
+import (
+ "context"
+ "encoding/json"
+ "path/filepath"
+ "testing"
+
+ "github.com/restic/restic/internal/restic"
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func testRunPrune(t testing.TB, gopts GlobalOptions, opts PruneOptions) {
+ oldHook := gopts.backendTestHook
+ gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) { return newListOnceBackend(r), nil }
+ defer func() {
+ gopts.backendTestHook = oldHook
+ }()
+ rtest.OK(t, runPrune(context.TODO(), opts, gopts))
+}
+
+func TestPrune(t *testing.T) {
+ testPruneVariants(t, false)
+ testPruneVariants(t, true)
+}
+
+func testPruneVariants(t *testing.T, unsafeNoSpaceRecovery bool) {
+ suffix := ""
+ if unsafeNoSpaceRecovery {
+ suffix = "-recovery"
+ }
+ t.Run("0"+suffix, func(t *testing.T) {
+ opts := PruneOptions{MaxUnused: "0%", unsafeRecovery: unsafeNoSpaceRecovery}
+ checkOpts := CheckOptions{ReadData: true, CheckUnused: true}
+ testPrune(t, opts, checkOpts)
+ })
+
+ t.Run("50"+suffix, func(t *testing.T) {
+ opts := PruneOptions{MaxUnused: "50%", unsafeRecovery: unsafeNoSpaceRecovery}
+ checkOpts := CheckOptions{ReadData: true}
+ testPrune(t, opts, checkOpts)
+ })
+
+ t.Run("unlimited"+suffix, func(t *testing.T) {
+ opts := PruneOptions{MaxUnused: "unlimited", unsafeRecovery: unsafeNoSpaceRecovery}
+ checkOpts := CheckOptions{ReadData: true}
+ testPrune(t, opts, checkOpts)
+ })
+
+ t.Run("CachableOnly"+suffix, func(t *testing.T) {
+ opts := PruneOptions{MaxUnused: "5%", RepackCachableOnly: true, unsafeRecovery: unsafeNoSpaceRecovery}
+ checkOpts := CheckOptions{ReadData: true}
+ testPrune(t, opts, checkOpts)
+ })
+ t.Run("Small", func(t *testing.T) {
+ opts := PruneOptions{MaxUnused: "unlimited", RepackSmall: true}
+ checkOpts := CheckOptions{ReadData: true, CheckUnused: true}
+ testPrune(t, opts, checkOpts)
+ })
+}
+
+func createPrunableRepo(t *testing.T, env *testEnvironment) {
+ testSetupBackupData(t, env)
+ opts := BackupOptions{}
+
+ testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
+ firstSnapshot := testListSnapshots(t, env.gopts, 1)[0]
+
+ testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
+ testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
+ testListSnapshots(t, env.gopts, 3)
+
+ testRunForgetJSON(t, env.gopts)
+ testRunForget(t, env.gopts, firstSnapshot.String())
+}
+
+func testRunForgetJSON(t testing.TB, gopts GlobalOptions, args ...string) {
+ buf, err := withCaptureStdout(func() error {
+ gopts.JSON = true
+ opts := ForgetOptions{
+ DryRun: true,
+ Last: 1,
+ }
+ return runForget(context.TODO(), opts, gopts, args)
+ })
+ rtest.OK(t, err)
+
+ var forgets []*ForgetGroup
+ rtest.OK(t, json.Unmarshal(buf.Bytes(), &forgets))
+
+ rtest.Assert(t, len(forgets) == 1,
+ "Expected 1 snapshot group, got %v", len(forgets))
+ rtest.Assert(t, len(forgets[0].Keep) == 1,
+ "Expected 1 snapshot to be kept, got %v", len(forgets[0].Keep))
+ rtest.Assert(t, len(forgets[0].Remove) == 2,
+ "Expected 2 snapshots to be removed, got %v", len(forgets[0].Remove))
+}
+
+func testPrune(t *testing.T, pruneOpts PruneOptions, checkOpts CheckOptions) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ createPrunableRepo(t, env)
+ testRunPrune(t, env.gopts, pruneOpts)
+ rtest.OK(t, runCheck(context.TODO(), checkOpts, env.gopts, nil))
+}
+
+var pruneDefaultOptions = PruneOptions{MaxUnused: "5%"}
+
+func TestPruneWithDamagedRepository(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := filepath.Join("testdata", "backup-data.tar.gz")
+ testRunInit(t, env.gopts)
+
+ rtest.SetupTarTestFixture(t, env.testdata, datafile)
+ opts := BackupOptions{}
+
+ // create and delete snapshot to create unused blobs
+ testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
+ firstSnapshot := testListSnapshots(t, env.gopts, 1)[0]
+ testRunForget(t, env.gopts, firstSnapshot.String())
+
+ oldPacks := listPacks(env.gopts, t)
+
+ // create new snapshot, but lose all data
+ testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
+ testListSnapshots(t, env.gopts, 1)
+ removePacksExcept(env.gopts, t, oldPacks, false)
+
+ oldHook := env.gopts.backendTestHook
+ env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) { return newListOnceBackend(r), nil }
+ defer func() {
+ env.gopts.backendTestHook = oldHook
+ }()
+ // prune should fail
+ rtest.Assert(t, runPrune(context.TODO(), pruneDefaultOptions, env.gopts) == errorPacksMissing,
+ "prune should have reported index not complete error")
+}
+
+// Test repos for edge cases
+func TestEdgeCaseRepos(t *testing.T) {
+ opts := CheckOptions{}
+
+ // repo where index is completely missing
+ // => check and prune should fail
+ t.Run("no-index", func(t *testing.T) {
+ testEdgeCaseRepo(t, "repo-index-missing.tar.gz", opts, pruneDefaultOptions, false, false)
+ })
+
+ // repo where an existing and used blob is missing from the index
+ // => check and prune should fail
+ t.Run("index-missing-blob", func(t *testing.T) {
+ testEdgeCaseRepo(t, "repo-index-missing-blob.tar.gz", opts, pruneDefaultOptions, false, false)
+ })
+
+ // repo where a blob is missing
+ // => check and prune should fail
+ t.Run("missing-data", func(t *testing.T) {
+ testEdgeCaseRepo(t, "repo-data-missing.tar.gz", opts, pruneDefaultOptions, false, false)
+ })
+
+ // repo where blobs which are not needed are missing or in invalid pack files
+ // => check should fail and prune should repair this
+ t.Run("missing-unused-data", func(t *testing.T) {
+ testEdgeCaseRepo(t, "repo-unused-data-missing.tar.gz", opts, pruneDefaultOptions, false, true)
+ })
+
+ // repo where data exists that is not referenced
+ // => check and prune should fully work
+ t.Run("unreferenced-data", func(t *testing.T) {
+ testEdgeCaseRepo(t, "repo-unreferenced-data.tar.gz", opts, pruneDefaultOptions, true, true)
+ })
+
+ // repo where an obsolete index still exists
+ // => check and prune should fully work
+ t.Run("obsolete-index", func(t *testing.T) {
+ testEdgeCaseRepo(t, "repo-obsolete-index.tar.gz", opts, pruneDefaultOptions, true, true)
+ })
+
+ // repo which contains mixed (data/tree) packs
+ // => check and prune should fully work
+ t.Run("mixed-packs", func(t *testing.T) {
+ testEdgeCaseRepo(t, "repo-mixed.tar.gz", opts, pruneDefaultOptions, true, true)
+ })
+
+ // repo which contains duplicate blobs
+ // => checking for unused data should report an error and prune resolves the
+ // situation
+ opts = CheckOptions{
+ ReadData: true,
+ CheckUnused: true,
+ }
+ t.Run("duplicates", func(t *testing.T) {
+ testEdgeCaseRepo(t, "repo-duplicates.tar.gz", opts, pruneDefaultOptions, false, true)
+ })
+}
+
+func testEdgeCaseRepo(t *testing.T, tarfile string, optionsCheck CheckOptions, optionsPrune PruneOptions, checkOK, pruneOK bool) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := filepath.Join("testdata", tarfile)
+ rtest.SetupTarTestFixture(t, env.base, datafile)
+
+ if checkOK {
+ testRunCheck(t, env.gopts)
+ } else {
+ rtest.Assert(t, runCheck(context.TODO(), optionsCheck, env.gopts, nil) != nil,
+ "check should have reported an error")
+ }
+
+ if pruneOK {
+ testRunPrune(t, env.gopts, optionsPrune)
+ testRunCheck(t, env.gopts)
+ } else {
+ rtest.Assert(t, runPrune(context.TODO(), optionsPrune, env.gopts) != nil,
+ "prune should have reported an error")
+ }
+}
diff --git a/cmd/restic/cmd_recover.go b/cmd/restic/cmd_recover.go
index 65f4c8750..85dcc23d7 100644
--- a/cmd/restic/cmd_recover.go
+++ b/cmd/restic/cmd_recover.go
@@ -46,7 +46,7 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error {
return err
}
- lock, ctx, err := lockRepo(ctx, repo)
+ lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
diff --git a/cmd/restic/cmd_repair.go b/cmd/restic/cmd_repair.go
new file mode 100644
index 000000000..aefe02f3c
--- /dev/null
+++ b/cmd/restic/cmd_repair.go
@@ -0,0 +1,14 @@
+package main
+
+import (
+ "github.com/spf13/cobra"
+)
+
+var cmdRepair = &cobra.Command{
+ Use: "repair",
+ Short: "Repair the repository",
+}
+
+func init() {
+ cmdRoot.AddCommand(cmdRepair)
+}
diff --git a/cmd/restic/cmd_rebuild_index.go b/cmd/restic/cmd_repair_index.go
index 6d49cb917..b1905836a 100644
--- a/cmd/restic/cmd_rebuild_index.go
+++ b/cmd/restic/cmd_repair_index.go
@@ -7,15 +7,15 @@ import (
"github.com/restic/restic/internal/pack"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
-
"github.com/spf13/cobra"
+ "github.com/spf13/pflag"
)
-var cmdRebuildIndex = &cobra.Command{
- Use: "rebuild-index [flags]",
+var cmdRepairIndex = &cobra.Command{
+ Use: "index [flags]",
Short: "Build a new index",
Long: `
-The "rebuild-index" command creates a new index based on the pack files in the
+The "repair index" command creates a new index based on the pack files in the
repository.
EXIT STATUS
@@ -25,40 +25,52 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
- return runRebuildIndex(cmd.Context(), rebuildIndexOptions, globalOptions)
+ return runRebuildIndex(cmd.Context(), repairIndexOptions, globalOptions)
},
}
-// RebuildIndexOptions collects all options for the rebuild-index command.
-type RebuildIndexOptions struct {
+var cmdRebuildIndex = &cobra.Command{
+ Use: "rebuild-index [flags]",
+ Short: cmdRepairIndex.Short,
+ Long: cmdRepairIndex.Long,
+ Deprecated: `Use "repair index" instead`,
+ DisableAutoGenTag: true,
+ RunE: cmdRepairIndex.RunE,
+}
+
+// RepairIndexOptions collects all options for the repair index command.
+type RepairIndexOptions struct {
ReadAllPacks bool
}
-var rebuildIndexOptions RebuildIndexOptions
+var repairIndexOptions RepairIndexOptions
func init() {
+ cmdRepair.AddCommand(cmdRepairIndex)
+ // add alias for old name
cmdRoot.AddCommand(cmdRebuildIndex)
- f := cmdRebuildIndex.Flags()
- f.BoolVar(&rebuildIndexOptions.ReadAllPacks, "read-all-packs", false, "read all pack files to generate new index from scratch")
+ for _, f := range []*pflag.FlagSet{cmdRepairIndex.Flags(), cmdRebuildIndex.Flags()} {
+ f.BoolVar(&repairIndexOptions.ReadAllPacks, "read-all-packs", false, "read all pack files to generate new index from scratch")
+ }
}
-func runRebuildIndex(ctx context.Context, opts RebuildIndexOptions, gopts GlobalOptions) error {
+func runRebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions) error {
repo, err := OpenRepository(ctx, gopts)
if err != nil {
return err
}
- lock, ctx, err := lockRepoExclusive(ctx, repo)
+ lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
- return rebuildIndex(ctx, opts, gopts, repo, restic.NewIDSet())
+ return rebuildIndex(ctx, opts, gopts, repo)
}
-func rebuildIndex(ctx context.Context, opts RebuildIndexOptions, gopts GlobalOptions, repo *repository.Repository, ignorePacks restic.IDSet) error {
+func rebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions, repo *repository.Repository) error {
var obsoleteIndexes restic.IDs
packSizeFromList := make(map[restic.ID]int64)
packSizeFromIndex := make(map[restic.ID]int64)
@@ -130,7 +142,7 @@ func rebuildIndex(ctx context.Context, opts RebuildIndexOptions, gopts GlobalOpt
if len(packSizeFromList) > 0 {
Verbosef("reading pack files\n")
- bar := newProgressMax(!globalOptions.Quiet, uint64(len(packSizeFromList)), "packs")
+ bar := newProgressMax(!gopts.Quiet, uint64(len(packSizeFromList)), "packs")
invalidFiles, err := repo.CreateIndexFromPacks(ctx, packSizeFromList, bar)
bar.Done()
if err != nil {
diff --git a/cmd/restic/cmd_repair_index_integration_test.go b/cmd/restic/cmd_repair_index_integration_test.go
new file mode 100644
index 000000000..f451173a3
--- /dev/null
+++ b/cmd/restic/cmd_repair_index_integration_test.go
@@ -0,0 +1,140 @@
+package main
+
+import (
+ "context"
+ "io"
+ "path/filepath"
+ "strings"
+ "sync"
+ "testing"
+
+ "github.com/restic/restic/internal/errors"
+ "github.com/restic/restic/internal/index"
+ "github.com/restic/restic/internal/restic"
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func testRunRebuildIndex(t testing.TB, gopts GlobalOptions) {
+ rtest.OK(t, withRestoreGlobalOptions(func() error {
+ globalOptions.stdout = io.Discard
+ return runRebuildIndex(context.TODO(), RepairIndexOptions{}, gopts)
+ }))
+}
+
+func testRebuildIndex(t *testing.T, backendTestHook backendWrapper) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := filepath.Join("..", "..", "internal", "checker", "testdata", "duplicate-packs-in-index-test-repo.tar.gz")
+ rtest.SetupTarTestFixture(t, env.base, datafile)
+
+ out, err := testRunCheckOutput(env.gopts, false)
+ if !strings.Contains(out, "contained in several indexes") {
+ t.Fatalf("did not find checker hint for packs in several indexes")
+ }
+
+ if err != nil {
+ t.Fatalf("expected no error from checker for test repository, got %v", err)
+ }
+
+ if !strings.Contains(out, "restic repair index") {
+ t.Fatalf("did not find hint for repair index command")
+ }
+
+ env.gopts.backendTestHook = backendTestHook
+ testRunRebuildIndex(t, env.gopts)
+
+ env.gopts.backendTestHook = nil
+ out, err = testRunCheckOutput(env.gopts, false)
+ if len(out) != 0 {
+ t.Fatalf("expected no output from the checker, got: %v", out)
+ }
+
+ if err != nil {
+ t.Fatalf("expected no error from checker after repair index, got: %v", err)
+ }
+}
+
+func TestRebuildIndex(t *testing.T) {
+ testRebuildIndex(t, nil)
+}
+
+func TestRebuildIndexAlwaysFull(t *testing.T) {
+ indexFull := index.IndexFull
+ defer func() {
+ index.IndexFull = indexFull
+ }()
+ index.IndexFull = func(*index.Index, bool) bool { return true }
+ testRebuildIndex(t, nil)
+}
+
+// indexErrorBackend modifies the first index after reading.
+type indexErrorBackend struct {
+ restic.Backend
+ lock sync.Mutex
+ hasErred bool
+}
+
+func (b *indexErrorBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64, consumer func(rd io.Reader) error) error {
+ return b.Backend.Load(ctx, h, length, offset, func(rd io.Reader) error {
+ // protect hasErred
+ b.lock.Lock()
+ defer b.lock.Unlock()
+ if !b.hasErred && h.Type == restic.IndexFile {
+ b.hasErred = true
+ return consumer(errorReadCloser{rd})
+ }
+ return consumer(rd)
+ })
+}
+
+type errorReadCloser struct {
+ io.Reader
+}
+
+func (erd errorReadCloser) Read(p []byte) (int, error) {
+ n, err := erd.Reader.Read(p)
+ if n > 0 {
+ p[0] ^= 1
+ }
+ return n, err
+}
+
+func TestRebuildIndexDamage(t *testing.T) {
+ testRebuildIndex(t, func(r restic.Backend) (restic.Backend, error) {
+ return &indexErrorBackend{
+ Backend: r,
+ }, nil
+ })
+}
+
+type appendOnlyBackend struct {
+ restic.Backend
+}
+
+// called via repo.Backend().Remove()
+func (b *appendOnlyBackend) Remove(_ context.Context, h restic.Handle) error {
+ return errors.Errorf("Failed to remove %v", h)
+}
+
+func TestRebuildIndexFailsOnAppendOnly(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := filepath.Join("..", "..", "internal", "checker", "testdata", "duplicate-packs-in-index-test-repo.tar.gz")
+ rtest.SetupTarTestFixture(t, env.base, datafile)
+
+ err := withRestoreGlobalOptions(func() error {
+ globalOptions.stdout = io.Discard
+
+ env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) {
+ return &appendOnlyBackend{r}, nil
+ }
+ return runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts)
+ })
+
+ if err == nil {
+ t.Error("expected rebuildIndex to fail")
+ }
+ t.Log(err)
+}
diff --git a/cmd/restic/cmd_repair_snapshots.go b/cmd/restic/cmd_repair_snapshots.go
new file mode 100644
index 000000000..03736795c
--- /dev/null
+++ b/cmd/restic/cmd_repair_snapshots.go
@@ -0,0 +1,176 @@
+package main
+
+import (
+ "context"
+
+ "github.com/restic/restic/internal/backend"
+ "github.com/restic/restic/internal/errors"
+ "github.com/restic/restic/internal/restic"
+ "github.com/restic/restic/internal/walker"
+
+ "github.com/spf13/cobra"
+)
+
+var cmdRepairSnapshots = &cobra.Command{
+ Use: "snapshots [flags] [snapshot ID] [...]",
+ Short: "Repair snapshots",
+ Long: `
+The "repair snapshots" command repairs broken snapshots. It scans the given
+snapshots and generates new ones with damaged directories and file contents
+removed. If the broken snapshots are deleted, a prune run will be able to
+clean up the repository.
+
+The command depends on a correct index, thus make sure to run "repair index"
+first!
+
+
+WARNING
+=======
+
+Repairing and deleting broken snapshots causes data loss! It will remove broken
+directories and modify broken files in the modified snapshots.
+
+If the contents of directories and files are still available, the better option
+is to run "backup" which in that case is able to heal existing snapshots. Only
+use the "repair snapshots" command if you need to recover an old and broken
+snapshot!
+
+EXIT STATUS
+===========
+
+Exit status is 0 if the command was successful, and non-zero if there was any error.
+`,
+ DisableAutoGenTag: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runRepairSnapshots(cmd.Context(), globalOptions, repairSnapshotOptions, args)
+ },
+}
+
+// RepairOptions collects all options for the repair command.
+type RepairOptions struct {
+ DryRun bool
+ Forget bool
+
+ restic.SnapshotFilter
+}
+
+var repairSnapshotOptions RepairOptions
+
+func init() {
+ cmdRepair.AddCommand(cmdRepairSnapshots)
+ flags := cmdRepairSnapshots.Flags()
+
+ flags.BoolVarP(&repairSnapshotOptions.DryRun, "dry-run", "n", false, "do not do anything, just print what would be done")
+ flags.BoolVarP(&repairSnapshotOptions.Forget, "forget", "", false, "remove original snapshots after creating new ones")
+
+ initMultiSnapshotFilter(flags, &repairSnapshotOptions.SnapshotFilter, true)
+}
+
+func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOptions, args []string) error {
+ repo, err := OpenRepository(ctx, gopts)
+ if err != nil {
+ return err
+ }
+
+ if !opts.DryRun {
+ var lock *restic.Lock
+ var err error
+ lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
+ defer unlockRepo(lock)
+ if err != nil {
+ return err
+ }
+ } else {
+ repo.SetDryRun()
+ }
+
+ snapshotLister, err := backend.MemorizeList(ctx, repo.Backend(), restic.SnapshotFile)
+ if err != nil {
+ return err
+ }
+
+ if err := repo.LoadIndex(ctx); err != nil {
+ return err
+ }
+
+ // Three error cases are checked:
+ // - tree is a nil tree (-> will be replaced by an empty tree)
+ // - trees which cannot be loaded (-> the tree contents will be removed)
+ // - files whose contents are not fully available (-> file will be modified)
+ rewriter := walker.NewTreeRewriter(walker.RewriteOpts{
+ RewriteNode: func(node *restic.Node, path string) *restic.Node {
+ if node.Type != "file" {
+ return node
+ }
+
+ ok := true
+ var newContent restic.IDs = restic.IDs{}
+ var newSize uint64
+ // check all contents and remove if not available
+ for _, id := range node.Content {
+ if size, found := repo.LookupBlobSize(id, restic.DataBlob); !found {
+ ok = false
+ } else {
+ newContent = append(newContent, id)
+ newSize += uint64(size)
+ }
+ }
+ if !ok {
+ Verbosef(" file %q: removed missing content\n", path)
+ } else if newSize != node.Size {
+ Verbosef(" file %q: fixed incorrect size\n", path)
+ }
+ // no-ops if already correct
+ node.Content = newContent
+ node.Size = newSize
+ return node
+ },
+ RewriteFailedTree: func(nodeID restic.ID, path string, _ error) (restic.ID, error) {
+ if path == "/" {
+ Verbosef(" dir %q: not readable\n", path)
+ // remove snapshots with invalid root node
+ return restic.ID{}, nil
+ }
+ // If a subtree fails to load, remove it
+ Verbosef(" dir %q: replaced with empty directory\n", path)
+ emptyID, err := restic.SaveTree(ctx, repo, &restic.Tree{})
+ if err != nil {
+ return restic.ID{}, err
+ }
+ return emptyID, nil
+ },
+ AllowUnstableSerialization: true,
+ })
+
+ changedCount := 0
+ for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, args) {
+ Verbosef("\nsnapshot %s of %v at %s)\n", sn.ID().Str(), sn.Paths, sn.Time)
+ changed, err := filterAndReplaceSnapshot(ctx, repo, sn,
+ func(ctx context.Context, sn *restic.Snapshot) (restic.ID, error) {
+ return rewriter.RewriteTree(ctx, repo, "/", *sn.Tree)
+ }, opts.DryRun, opts.Forget, "repaired")
+ if err != nil {
+ return errors.Fatalf("unable to rewrite snapshot ID %q: %v", sn.ID().Str(), err)
+ }
+ if changed {
+ changedCount++
+ }
+ }
+
+ Verbosef("\n")
+ if changedCount == 0 {
+ if !opts.DryRun {
+ Verbosef("no snapshots were modified\n")
+ } else {
+ Verbosef("no snapshots would be modified\n")
+ }
+ } else {
+ if !opts.DryRun {
+ Verbosef("modified %v snapshots\n", changedCount)
+ } else {
+ Verbosef("would modify %v snapshots\n", changedCount)
+ }
+ }
+
+ return nil
+}
diff --git a/cmd/restic/cmd_repair_snapshots_integration_test.go b/cmd/restic/cmd_repair_snapshots_integration_test.go
new file mode 100644
index 000000000..34cd186d3
--- /dev/null
+++ b/cmd/restic/cmd_repair_snapshots_integration_test.go
@@ -0,0 +1,135 @@
+package main
+
+import (
+ "context"
+ "hash/fnv"
+ "io"
+ "math/rand"
+ "os"
+ "path/filepath"
+ "reflect"
+ "testing"
+
+ "github.com/restic/restic/internal/restic"
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func testRunRepairSnapshot(t testing.TB, gopts GlobalOptions, forget bool) {
+ opts := RepairOptions{
+ Forget: forget,
+ }
+
+ rtest.OK(t, runRepairSnapshots(context.TODO(), gopts, opts, nil))
+}
+
+func createRandomFile(t testing.TB, env *testEnvironment, path string, size int) {
+ fn := filepath.Join(env.testdata, path)
+ rtest.OK(t, os.MkdirAll(filepath.Dir(fn), 0o755))
+
+ h := fnv.New64()
+ _, err := h.Write([]byte(path))
+ rtest.OK(t, err)
+ r := rand.New(rand.NewSource(int64(h.Sum64())))
+
+ f, err := os.OpenFile(fn, os.O_CREATE|os.O_RDWR, 0o644)
+ rtest.OK(t, err)
+ _, err = io.Copy(f, io.LimitReader(r, int64(size)))
+ rtest.OK(t, err)
+ rtest.OK(t, f.Close())
+}
+
+func TestRepairSnapshotsWithLostData(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ createRandomFile(t, env, "foo/bar/file", 512*1024)
+ testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
+ testListSnapshots(t, env.gopts, 1)
+ // damage repository
+ removePacksExcept(env.gopts, t, restic.NewIDSet(), false)
+
+ createRandomFile(t, env, "foo/bar/file2", 256*1024)
+ testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
+ snapshotIDs := testListSnapshots(t, env.gopts, 2)
+ testRunCheckMustFail(t, env.gopts)
+
+ // repair but keep broken snapshots
+ testRunRebuildIndex(t, env.gopts)
+ testRunRepairSnapshot(t, env.gopts, false)
+ testListSnapshots(t, env.gopts, 4)
+ testRunCheckMustFail(t, env.gopts)
+
+ // repository must be ok after removing the broken snapshots
+ testRunForget(t, env.gopts, snapshotIDs[0].String(), snapshotIDs[1].String())
+ testListSnapshots(t, env.gopts, 2)
+ _, err := testRunCheckOutput(env.gopts, false)
+ rtest.OK(t, err)
+}
+
+func TestRepairSnapshotsWithLostTree(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ createRandomFile(t, env, "foo/bar/file", 12345)
+ testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
+ oldSnapshot := testListSnapshots(t, env.gopts, 1)
+ oldPacks := testRunList(t, "packs", env.gopts)
+
+ // keep foo/bar unchanged
+ createRandomFile(t, env, "foo/bar2", 1024)
+ testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
+ testListSnapshots(t, env.gopts, 2)
+
+ // remove tree for foo/bar and the now completely broken first snapshot
+ removePacks(env.gopts, t, restic.NewIDSet(oldPacks...))
+ testRunForget(t, env.gopts, oldSnapshot[0].String())
+ testRunCheckMustFail(t, env.gopts)
+
+ // repair
+ testRunRebuildIndex(t, env.gopts)
+ testRunRepairSnapshot(t, env.gopts, true)
+ testListSnapshots(t, env.gopts, 1)
+ _, err := testRunCheckOutput(env.gopts, false)
+ rtest.OK(t, err)
+}
+
+func TestRepairSnapshotsWithLostRootTree(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ createRandomFile(t, env, "foo/bar/file", 12345)
+ testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
+ testListSnapshots(t, env.gopts, 1)
+ oldPacks := testRunList(t, "packs", env.gopts)
+
+ // remove all trees
+ removePacks(env.gopts, t, restic.NewIDSet(oldPacks...))
+ testRunCheckMustFail(t, env.gopts)
+
+ // repair
+ testRunRebuildIndex(t, env.gopts)
+ testRunRepairSnapshot(t, env.gopts, true)
+ testListSnapshots(t, env.gopts, 0)
+ _, err := testRunCheckOutput(env.gopts, false)
+ rtest.OK(t, err)
+}
+
+func TestRepairSnapshotsIntact(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+ testSetupBackupData(t, env)
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{}, env.gopts)
+ oldSnapshotIDs := testListSnapshots(t, env.gopts, 1)
+
+ // use an exclude that will not exclude anything
+ testRunRepairSnapshot(t, env.gopts, false)
+ snapshotIDs := testListSnapshots(t, env.gopts, 1)
+ rtest.Assert(t, reflect.DeepEqual(oldSnapshotIDs, snapshotIDs), "unexpected snapshot id mismatch %v vs. %v", oldSnapshotIDs, snapshotIDs)
+ testRunCheck(t, env.gopts)
+}
diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go
index 579711662..6ef8c99db 100644
--- a/cmd/restic/cmd_restore.go
+++ b/cmd/restic/cmd_restore.go
@@ -3,6 +3,7 @@ package main
import (
"context"
"strings"
+ "sync"
"time"
"github.com/restic/restic/internal/debug"
@@ -10,6 +11,9 @@ import (
"github.com/restic/restic/internal/filter"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/restorer"
+ "github.com/restic/restic/internal/ui"
+ restoreui "github.com/restic/restic/internal/ui/restore"
+ "github.com/restic/restic/internal/ui/termstatus"
"github.com/spf13/cobra"
)
@@ -31,7 +35,31 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
- return runRestore(cmd.Context(), restoreOptions, globalOptions, args)
+ ctx := cmd.Context()
+ var wg sync.WaitGroup
+ cancelCtx, cancel := context.WithCancel(ctx)
+ defer func() {
+ // shutdown termstatus
+ cancel()
+ wg.Wait()
+ }()
+
+ term := termstatus.New(globalOptions.stdout, globalOptions.stderr, globalOptions.Quiet)
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ term.Run(cancelCtx)
+ }()
+
+ // allow usage of warnf / verbosef
+ prevStdout, prevStderr := globalOptions.stdout, globalOptions.stderr
+ defer func() {
+ globalOptions.stdout, globalOptions.stderr = prevStdout, prevStderr
+ }()
+ stdioWrapper := ui.NewStdioWrapper(term)
+ globalOptions.stdout, globalOptions.stderr = stdioWrapper.Stdout(), stdioWrapper.Stderr()
+
+ return runRestore(ctx, restoreOptions, globalOptions, term, args)
},
}
@@ -64,7 +92,9 @@ func init() {
flags.BoolVar(&restoreOptions.Verify, "verify", false, "verify restored files content")
}
-func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, args []string) error {
+func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions,
+ term *termstatus.Terminal, args []string) error {
+
hasExcludes := len(opts.Exclude) > 0 || len(opts.InsensitiveExclude) > 0
hasIncludes := len(opts.Include) > 0 || len(opts.InsensitiveInclude) > 0
@@ -124,14 +154,14 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, a
if !gopts.NoLock {
var lock *restic.Lock
- lock, ctx, err = lockRepo(ctx, repo)
+ lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
}
- sn, err := (&restic.SnapshotFilter{
+ sn, subfolder, err := (&restic.SnapshotFilter{
Hosts: opts.Hosts,
Paths: opts.Paths,
Tags: opts.Tags,
@@ -145,11 +175,25 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, a
return err
}
- res := restorer.NewRestorer(ctx, repo, sn, opts.Sparse)
+ sn.Tree, err = restic.FindTreeDirectory(ctx, repo, sn.Tree, subfolder)
+ if err != nil {
+ return err
+ }
+
+ msg := ui.NewMessage(term, gopts.verbosity)
+ var printer restoreui.ProgressPrinter
+ if gopts.JSON {
+ printer = restoreui.NewJSONProgress(term)
+ } else {
+ printer = restoreui.NewTextProgress(term)
+ }
+
+ progress := restoreui.NewProgress(printer, calculateProgressInterval(!gopts.Quiet, gopts.JSON))
+ res := restorer.NewRestorer(repo, sn, opts.Sparse, progress)
totalErrors := 0
res.Error = func(location string, err error) error {
- Warnf("ignoring error for %s: %s\n", location, err)
+ msg.E("ignoring error for %s: %s\n", location, err)
totalErrors++
return nil
}
@@ -159,12 +203,12 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, a
selectExcludeFilter := func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) {
matched, err := filter.List(excludePatterns, item)
if err != nil {
- Warnf("error for exclude pattern: %v", err)
+ msg.E("error for exclude pattern: %v", err)
}
matchedInsensitive, err := filter.List(insensitiveExcludePatterns, strings.ToLower(item))
if err != nil {
- Warnf("error for iexclude pattern: %v", err)
+ msg.E("error for iexclude pattern: %v", err)
}
// An exclude filter is basically a 'wildcard but foo',
@@ -182,12 +226,12 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, a
selectIncludeFilter := func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) {
matched, childMayMatch, err := filter.ListWithChild(includePatterns, item)
if err != nil {
- Warnf("error for include pattern: %v", err)
+ msg.E("error for include pattern: %v", err)
}
matchedInsensitive, childMayMatchInsensitive, err := filter.ListWithChild(insensitiveIncludePatterns, strings.ToLower(item))
if err != nil {
- Warnf("error for iexclude pattern: %v", err)
+ msg.E("error for iexclude pattern: %v", err)
}
selectedForRestore = matched || matchedInsensitive
@@ -202,19 +246,25 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, a
res.SelectFilter = selectIncludeFilter
}
- Verbosef("restoring %s to %s\n", res.Snapshot(), opts.Target)
+ if !gopts.JSON {
+ msg.P("restoring %s to %s\n", res.Snapshot(), opts.Target)
+ }
err = res.RestoreTo(ctx, opts.Target)
if err != nil {
return err
}
+ progress.Finish()
+
if totalErrors > 0 {
return errors.Fatalf("There were %d errors\n", totalErrors)
}
if opts.Verify {
- Verbosef("verifying files in %s\n", opts.Target)
+ if !gopts.JSON {
+ msg.P("verifying files in %s\n", opts.Target)
+ }
var count int
t0 := time.Now()
count, err = res.VerifyFiles(ctx, opts.Target)
@@ -224,8 +274,11 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, a
if totalErrors > 0 {
return errors.Fatalf("There were %d errors\n", totalErrors)
}
- Verbosef("finished verifying %d files in %s (took %s)\n", count, opts.Target,
- time.Since(t0).Round(time.Millisecond))
+
+ if !gopts.JSON {
+ msg.P("finished verifying %d files in %s (took %s)\n", count, opts.Target,
+ time.Since(t0).Round(time.Millisecond))
+ }
}
return nil
diff --git a/cmd/restic/cmd_restore_integration_test.go b/cmd/restic/cmd_restore_integration_test.go
new file mode 100644
index 000000000..2c7cbe1fb
--- /dev/null
+++ b/cmd/restic/cmd_restore_integration_test.go
@@ -0,0 +1,307 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "io"
+ mrand "math/rand"
+ "os"
+ "path/filepath"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/restic/restic/internal/filter"
+ "github.com/restic/restic/internal/restic"
+ rtest "github.com/restic/restic/internal/test"
+ "github.com/restic/restic/internal/ui/termstatus"
+)
+
+func testRunRestore(t testing.TB, opts GlobalOptions, dir string, snapshotID restic.ID) {
+ testRunRestoreExcludes(t, opts, dir, snapshotID, nil)
+}
+
+func testRunRestoreExcludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, excludes []string) {
+ opts := RestoreOptions{
+ Target: dir,
+ Exclude: excludes,
+ }
+
+ rtest.OK(t, testRunRestoreAssumeFailure(snapshotID.String(), opts, gopts))
+}
+
+func testRunRestoreAssumeFailure(snapshotID string, opts RestoreOptions, gopts GlobalOptions) error {
+ return withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error {
+ return runRestore(ctx, opts, gopts, term, []string{snapshotID})
+ })
+}
+
+func testRunRestoreLatest(t testing.TB, gopts GlobalOptions, dir string, paths []string, hosts []string) {
+ opts := RestoreOptions{
+ Target: dir,
+ SnapshotFilter: restic.SnapshotFilter{
+ Hosts: hosts,
+ Paths: paths,
+ },
+ }
+
+ rtest.OK(t, testRunRestoreAssumeFailure("latest", opts, gopts))
+}
+
+func testRunRestoreIncludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, includes []string) {
+ opts := RestoreOptions{
+ Target: dir,
+ Include: includes,
+ }
+
+ rtest.OK(t, testRunRestoreAssumeFailure(snapshotID.String(), opts, gopts))
+}
+
+func TestRestoreFilter(t *testing.T) {
+ testfiles := []struct {
+ name string
+ size uint
+ }{
+ {"testfile1.c", 100},
+ {"testfile2.exe", 101},
+ {"subdir1/subdir2/testfile3.docx", 102},
+ {"subdir1/subdir2/testfile4.c", 102},
+ }
+
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ for _, testFile := range testfiles {
+ p := filepath.Join(env.testdata, testFile.name)
+ rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
+ rtest.OK(t, appendRandomData(p, testFile.size))
+ }
+
+ opts := BackupOptions{}
+
+ testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ snapshotID := testListSnapshots(t, env.gopts, 1)[0]
+
+ // no restore filter should restore all files
+ testRunRestore(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID)
+ for _, testFile := range testfiles {
+ rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", testFile.name), int64(testFile.size)))
+ }
+
+ for i, pat := range []string{"*.c", "*.exe", "*", "*file3*"} {
+ base := filepath.Join(env.base, fmt.Sprintf("restore%d", i+1))
+ testRunRestoreExcludes(t, env.gopts, base, snapshotID, []string{pat})
+ for _, testFile := range testfiles {
+ err := testFileSize(filepath.Join(base, "testdata", testFile.name), int64(testFile.size))
+ if ok, _ := filter.Match(pat, filepath.Base(testFile.name)); !ok {
+ rtest.OK(t, err)
+ } else {
+ rtest.Assert(t, os.IsNotExist(err),
+ "expected %v to not exist in restore step %v, but it exists, err %v", testFile.name, i+1, err)
+ }
+ }
+ }
+}
+
+func TestRestore(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ for i := 0; i < 10; i++ {
+ p := filepath.Join(env.testdata, fmt.Sprintf("foo/bar/testfile%v", i))
+ rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
+ rtest.OK(t, appendRandomData(p, uint(mrand.Intn(2<<21))))
+ }
+
+ opts := BackupOptions{}
+
+ testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ // Restore latest without any filters
+ restoredir := filepath.Join(env.base, "restore")
+ testRunRestoreLatest(t, env.gopts, restoredir, nil, nil)
+
+ diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, filepath.Base(env.testdata)))
+ rtest.Assert(t, diff == "", "directories are not equal %v", diff)
+}
+
+func TestRestoreLatest(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ p := filepath.Join(env.testdata, "testfile.c")
+ rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
+ rtest.OK(t, appendRandomData(p, 100))
+
+ opts := BackupOptions{}
+
+ // chdir manually here so we can get the current directory. This is not the
+ // same as the temp dir returned by os.MkdirTemp() on darwin.
+ back := rtest.Chdir(t, filepath.Dir(env.testdata))
+ defer back()
+
+ curdir, err := os.Getwd()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ testRunBackup(t, "", []string{filepath.Base(env.testdata)}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ rtest.OK(t, os.Remove(p))
+ rtest.OK(t, appendRandomData(p, 101))
+ testRunBackup(t, "", []string{filepath.Base(env.testdata)}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ // Restore latest without any filters
+ testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore0"), nil, nil)
+ rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", "testfile.c"), int64(101)))
+
+ // Setup test files in different directories backed up in different snapshots
+ p1 := filepath.Join(curdir, filepath.FromSlash("p1/testfile.c"))
+
+ rtest.OK(t, os.MkdirAll(filepath.Dir(p1), 0755))
+ rtest.OK(t, appendRandomData(p1, 102))
+ testRunBackup(t, "", []string{"p1"}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ p2 := filepath.Join(curdir, filepath.FromSlash("p2/testfile.c"))
+
+ rtest.OK(t, os.MkdirAll(filepath.Dir(p2), 0755))
+ rtest.OK(t, appendRandomData(p2, 103))
+ testRunBackup(t, "", []string{"p2"}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ p1rAbs := filepath.Join(env.base, "restore1", "p1/testfile.c")
+ p2rAbs := filepath.Join(env.base, "restore2", "p2/testfile.c")
+
+ testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore1"), []string{filepath.Dir(p1)}, nil)
+ rtest.OK(t, testFileSize(p1rAbs, int64(102)))
+ if _, err := os.Stat(p2rAbs); os.IsNotExist(err) {
+ rtest.Assert(t, os.IsNotExist(err),
+ "expected %v to not exist in restore, but it exists, err %v", p2rAbs, err)
+ }
+
+ testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore2"), []string{filepath.Dir(p2)}, nil)
+ rtest.OK(t, testFileSize(p2rAbs, int64(103)))
+ if _, err := os.Stat(p1rAbs); os.IsNotExist(err) {
+ rtest.Assert(t, os.IsNotExist(err),
+ "expected %v to not exist in restore, but it exists, err %v", p1rAbs, err)
+ }
+}
+
+func TestRestoreWithPermissionFailure(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := filepath.Join("testdata", "repo-restore-permissions-test.tar.gz")
+ rtest.SetupTarTestFixture(t, env.base, datafile)
+
+ snapshots := testListSnapshots(t, env.gopts, 1)
+
+ _ = withRestoreGlobalOptions(func() error {
+ globalOptions.stderr = io.Discard
+ testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshots[0])
+ return nil
+ })
+
+ // make sure that all files have been restored, regardless of any
+ // permission errors
+ files := testRunLs(t, env.gopts, snapshots[0].String())
+ for _, filename := range files {
+ fi, err := os.Lstat(filepath.Join(env.base, "restore", filename))
+ rtest.OK(t, err)
+
+ rtest.Assert(t, !isFile(fi) || fi.Size() > 0,
+ "file %v restored, but filesize is 0", filename)
+ }
+}
+
+func setZeroModTime(filename string) error {
+ var utimes = []syscall.Timespec{
+ syscall.NsecToTimespec(0),
+ syscall.NsecToTimespec(0),
+ }
+
+ return syscall.UtimesNano(filename, utimes)
+}
+
+func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ p := filepath.Join(env.testdata, "subdir1", "subdir2", "subdir3", "file.ext")
+ rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
+ rtest.OK(t, appendRandomData(p, 200))
+ rtest.OK(t, setZeroModTime(filepath.Join(env.testdata, "subdir1", "subdir2")))
+
+ opts := BackupOptions{}
+
+ testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ snapshotID := testListSnapshots(t, env.gopts, 1)[0]
+
+ // restore with filter "*.ext", this should restore "file.ext", but
+ // since the directories are ignored and only created because of
+ // "file.ext", no meta data should be restored for them.
+ testRunRestoreIncludes(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID, []string{"*.ext"})
+
+ f1 := filepath.Join(env.base, "restore0", "testdata", "subdir1", "subdir2")
+ _, err := os.Stat(f1)
+ rtest.OK(t, err)
+
+ // restore with filter "*", this should restore meta data on everything.
+ testRunRestoreIncludes(t, env.gopts, filepath.Join(env.base, "restore1"), snapshotID, []string{"*"})
+
+ f2 := filepath.Join(env.base, "restore1", "testdata", "subdir1", "subdir2")
+ fi, err := os.Stat(f2)
+ rtest.OK(t, err)
+
+ rtest.Assert(t, fi.ModTime() == time.Unix(0, 0),
+ "meta data of intermediate directory hasn't been restore")
+}
+
+func TestRestoreLocalLayout(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ var tests = []struct {
+ filename string
+ layout string
+ }{
+ {"repo-layout-default.tar.gz", ""},
+ {"repo-layout-s3legacy.tar.gz", ""},
+ {"repo-layout-default.tar.gz", "default"},
+ {"repo-layout-s3legacy.tar.gz", "s3legacy"},
+ }
+
+ for _, test := range tests {
+ datafile := filepath.Join("..", "..", "internal", "backend", "testdata", test.filename)
+
+ rtest.SetupTarTestFixture(t, env.base, datafile)
+
+ env.gopts.extended["local.layout"] = test.layout
+
+ // check the repo
+ testRunCheck(t, env.gopts)
+
+ // restore latest snapshot
+ target := filepath.Join(env.base, "restore")
+ testRunRestoreLatest(t, env.gopts, target, nil, nil)
+
+ rtest.RemoveAll(t, filepath.Join(env.base, "repo"))
+ rtest.RemoveAll(t, target)
+ }
+}
diff --git a/cmd/restic/cmd_rewrite.go b/cmd/restic/cmd_rewrite.go
index 0d9aa1c8c..c08797c48 100644
--- a/cmd/restic/cmd_rewrite.go
+++ b/cmd/restic/cmd_rewrite.go
@@ -87,36 +87,67 @@ func rewriteSnapshot(ctx context.Context, repo *repository.Repository, sn *resti
return true
}
+ rewriter := walker.NewTreeRewriter(walker.RewriteOpts{
+ RewriteNode: func(node *restic.Node, path string) *restic.Node {
+ if selectByName(path) {
+ return node
+ }
+ Verbosef(fmt.Sprintf("excluding %s\n", path))
+ return nil
+ },
+ DisableNodeCache: true,
+ })
+
+ return filterAndReplaceSnapshot(ctx, repo, sn,
+ func(ctx context.Context, sn *restic.Snapshot) (restic.ID, error) {
+ return rewriter.RewriteTree(ctx, repo, "/", *sn.Tree)
+ }, opts.DryRun, opts.Forget, "rewrite")
+}
+
+func filterAndReplaceSnapshot(ctx context.Context, repo restic.Repository, sn *restic.Snapshot, filter func(ctx context.Context, sn *restic.Snapshot) (restic.ID, error), dryRun bool, forget bool, addTag string) (bool, error) {
+
wg, wgCtx := errgroup.WithContext(ctx)
repo.StartPackUploader(wgCtx, wg)
var filteredTree restic.ID
wg.Go(func() error {
- filteredTree, err = walker.FilterTree(wgCtx, repo, "/", *sn.Tree, &walker.TreeFilterVisitor{
- SelectByName: selectByName,
- PrintExclude: func(path string) { Verbosef(fmt.Sprintf("excluding %s\n", path)) },
- })
+ var err error
+ filteredTree, err = filter(ctx, sn)
if err != nil {
return err
}
return repo.Flush(wgCtx)
})
- err = wg.Wait()
+ err := wg.Wait()
if err != nil {
return false, err
}
+ if filteredTree.IsNull() {
+ if dryRun {
+ Verbosef("would delete empty snapshot\n")
+ } else {
+ h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
+ if err = repo.Backend().Remove(ctx, h); err != nil {
+ return false, err
+ }
+ debug.Log("removed empty snapshot %v", sn.ID())
+ Verbosef("removed empty snapshot %v\n", sn.ID().Str())
+ }
+ return true, nil
+ }
+
if filteredTree == *sn.Tree {
debug.Log("Snapshot %v not modified", sn)
return false, nil
}
debug.Log("Snapshot %v modified", sn)
- if opts.DryRun {
+ if dryRun {
Verbosef("would save new snapshot\n")
- if opts.Forget {
+ if forget {
Verbosef("would remove old snapshot\n")
}
@@ -125,10 +156,10 @@ func rewriteSnapshot(ctx context.Context, repo *repository.Repository, sn *resti
// Always set the original snapshot id as this essentially a new snapshot.
sn.Original = sn.ID()
- *sn.Tree = filteredTree
+ sn.Tree = &filteredTree
- if !opts.Forget {
- sn.AddTags([]string{"rewrite"})
+ if !forget {
+ sn.AddTags([]string{addTag})
}
// Save the new snapshot.
@@ -136,8 +167,9 @@ func rewriteSnapshot(ctx context.Context, repo *repository.Repository, sn *resti
if err != nil {
return false, err
}
+ Verbosef("saved new snapshot %v\n", id.Str())
- if opts.Forget {
+ if forget {
h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
if err = repo.Backend().Remove(ctx, h); err != nil {
return false, err
@@ -145,7 +177,6 @@ func rewriteSnapshot(ctx context.Context, repo *repository.Repository, sn *resti
debug.Log("removed old snapshot %v", sn.ID())
Verbosef("removed old snapshot %v\n", sn.ID().Str())
}
- Verbosef("saved new snapshot %v\n", id.Str())
return true, nil
}
@@ -164,9 +195,9 @@ func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, a
var err error
if opts.Forget {
Verbosef("create exclusive lock for repository\n")
- lock, ctx, err = lockRepoExclusive(ctx, repo)
+ lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
} else {
- lock, ctx, err = lockRepo(ctx, repo)
+ lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
}
defer unlockRepo(lock)
if err != nil {
diff --git a/cmd/restic/integration_rewrite_test.go b/cmd/restic/cmd_rewrite_integration_test.go
index e6007973b..e6007973b 100644
--- a/cmd/restic/integration_rewrite_test.go
+++ b/cmd/restic/cmd_rewrite_integration_test.go
diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go
index c5faa044a..889ac5e20 100644
--- a/cmd/restic/cmd_snapshots.go
+++ b/cmd/restic/cmd_snapshots.go
@@ -36,7 +36,7 @@ type SnapshotOptions struct {
Compact bool
Last bool // This option should be removed in favour of Latest.
Latest int
- GroupBy string
+ GroupBy restic.SnapshotGroupByOptions
}
var snapshotOptions SnapshotOptions
@@ -54,7 +54,7 @@ func init() {
panic(err)
}
f.IntVar(&snapshotOptions.Latest, "latest", 0, "only show the last `n` snapshots for each host and path")
- f.StringVarP(&snapshotOptions.GroupBy, "group-by", "g", "", "`group` snapshots by host, paths and/or tags, separated by comma")
+ f.VarP(&snapshotOptions.GroupBy, "group-by", "g", "`group` snapshots by host, paths and/or tags, separated by comma")
}
func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions, args []string) error {
@@ -65,7 +65,7 @@ func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions
if !gopts.NoLock {
var lock *restic.Lock
- lock, ctx, err = lockRepo(ctx, repo)
+ lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
@@ -94,7 +94,7 @@ func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions
}
if gopts.JSON {
- err := printSnapshotGroupJSON(gopts.stdout, snapshotGroups, grouped)
+ err := printSnapshotGroupJSON(globalOptions.stdout, snapshotGroups, grouped)
if err != nil {
Warnf("error printing snapshots: %v\n", err)
}
@@ -103,13 +103,13 @@ func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions
for k, list := range snapshotGroups {
if grouped {
- err := PrintSnapshotGroupHeader(gopts.stdout, k)
+ err := PrintSnapshotGroupHeader(globalOptions.stdout, k)
if err != nil {
Warnf("error printing snapshots: %v\n", err)
return nil
}
}
- PrintSnapshots(gopts.stdout, list, nil, opts.Compact)
+ PrintSnapshots(globalOptions.stdout, list, nil, opts.Compact)
}
return nil
diff --git a/cmd/restic/cmd_snapshots_integration_test.go b/cmd/restic/cmd_snapshots_integration_test.go
new file mode 100644
index 000000000..6eaa8faa4
--- /dev/null
+++ b/cmd/restic/cmd_snapshots_integration_test.go
@@ -0,0 +1,32 @@
+package main
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+
+ "github.com/restic/restic/internal/restic"
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func testRunSnapshots(t testing.TB, gopts GlobalOptions) (newest *Snapshot, snapmap map[restic.ID]Snapshot) {
+ buf, err := withCaptureStdout(func() error {
+ gopts.JSON = true
+
+ opts := SnapshotOptions{}
+ return runSnapshots(context.TODO(), opts, gopts, []string{})
+ })
+ rtest.OK(t, err)
+
+ snapshots := []Snapshot{}
+ rtest.OK(t, json.Unmarshal(buf.Bytes(), &snapshots))
+
+ snapmap = make(map[restic.ID]Snapshot, len(snapshots))
+ for _, sn := range snapshots {
+ snapmap[*sn.ID] = sn
+ if newest == nil || sn.Time.After(newest.Time) {
+ newest = &sn
+ }
+ }
+ return
+}
diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go
index 55ba6f254..a7ecd438f 100644
--- a/cmd/restic/cmd_stats.go
+++ b/cmd/restic/cmd_stats.go
@@ -5,11 +5,15 @@ import (
"encoding/json"
"fmt"
"path/filepath"
+ "strings"
+ "github.com/restic/chunker"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/crypto"
+ "github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui"
+ "github.com/restic/restic/internal/ui/table"
"github.com/restic/restic/internal/walker"
"github.com/minio/sha256-simd"
@@ -49,7 +53,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
- return runStats(cmd.Context(), globalOptions, args)
+ return runStats(cmd.Context(), statsOptions, globalOptions, args)
},
}
@@ -70,8 +74,8 @@ func init() {
initMultiSnapshotFilter(f, &statsOptions.SnapshotFilter, true)
}
-func runStats(ctx context.Context, gopts GlobalOptions, args []string) error {
- err := verifyStatsInput(gopts, args)
+func runStats(ctx context.Context, opts StatsOptions, gopts GlobalOptions, args []string) error {
+ err := verifyStatsInput(opts)
if err != nil {
return err
}
@@ -83,7 +87,7 @@ func runStats(ctx context.Context, gopts GlobalOptions, args []string) error {
if !gopts.NoLock {
var lock *restic.Lock
- lock, ctx, err = lockRepo(ctx, repo)
+ lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
@@ -99,6 +103,10 @@ func runStats(ctx context.Context, gopts GlobalOptions, args []string) error {
return err
}
+ if opts.countMode == countModeDebug {
+ return statsDebug(ctx, repo)
+ }
+
if !gopts.JSON {
Printf("scanning...\n")
}
@@ -111,8 +119,8 @@ func runStats(ctx context.Context, gopts GlobalOptions, args []string) error {
SnapshotsCount: 0,
}
- for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &statsOptions.SnapshotFilter, args) {
- err = statsWalkSnapshot(ctx, sn, repo, stats)
+ for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, args) {
+ err = statsWalkSnapshot(ctx, sn, repo, opts, stats)
if err != nil {
return fmt.Errorf("error walking snapshot: %v", err)
}
@@ -122,7 +130,7 @@ func runStats(ctx context.Context, gopts GlobalOptions, args []string) error {
return err
}
- if statsOptions.countMode == countModeRawData {
+ if opts.countMode == countModeRawData {
// the blob handles have been collected, but not yet counted
for blobHandle := range stats.blobs {
pbs := repo.Index().Lookup(blobHandle)
@@ -156,7 +164,7 @@ func runStats(ctx context.Context, gopts GlobalOptions, args []string) error {
return nil
}
- Printf("Stats in %s mode:\n", statsOptions.countMode)
+ Printf("Stats in %s mode:\n", opts.countMode)
Printf(" Snapshots processed: %d\n", stats.SnapshotsCount)
if stats.TotalBlobCount > 0 {
Printf(" Total Blob Count: %d\n", stats.TotalBlobCount)
@@ -181,21 +189,21 @@ func runStats(ctx context.Context, gopts GlobalOptions, args []string) error {
return nil
}
-func statsWalkSnapshot(ctx context.Context, snapshot *restic.Snapshot, repo restic.Repository, stats *statsContainer) error {
+func statsWalkSnapshot(ctx context.Context, snapshot *restic.Snapshot, repo restic.Repository, opts StatsOptions, stats *statsContainer) error {
if snapshot.Tree == nil {
return fmt.Errorf("snapshot %s has nil tree", snapshot.ID().Str())
}
stats.SnapshotsCount++
- if statsOptions.countMode == countModeRawData {
+ if opts.countMode == countModeRawData {
// count just the sizes of unique blobs; we don't need to walk the tree
// ourselves in this case, since a nifty function does it for us
return restic.FindUsedBlobs(ctx, repo, restic.IDs{*snapshot.Tree}, stats.blobs, nil)
}
uniqueInodes := make(map[uint64]struct{})
- err := walker.Walk(ctx, repo, *snapshot.Tree, restic.NewIDSet(), statsWalkTree(repo, stats, uniqueInodes))
+ err := walker.Walk(ctx, repo, *snapshot.Tree, restic.NewIDSet(), statsWalkTree(repo, opts, stats, uniqueInodes))
if err != nil {
return fmt.Errorf("walking tree %s: %v", *snapshot.Tree, err)
}
@@ -203,7 +211,7 @@ func statsWalkSnapshot(ctx context.Context, snapshot *restic.Snapshot, repo rest
return nil
}
-func statsWalkTree(repo restic.Repository, stats *statsContainer, uniqueInodes map[uint64]struct{}) walker.WalkFunc {
+func statsWalkTree(repo restic.Repository, opts StatsOptions, stats *statsContainer, uniqueInodes map[uint64]struct{}) walker.WalkFunc {
return func(parentTreeID restic.ID, npath string, node *restic.Node, nodeErr error) (bool, error) {
if nodeErr != nil {
return true, nodeErr
@@ -212,19 +220,19 @@ func statsWalkTree(repo restic.Repository, stats *statsContainer, uniqueInodes m
return true, nil
}
- if statsOptions.countMode == countModeUniqueFilesByContents || statsOptions.countMode == countModeBlobsPerFile {
+ if opts.countMode == countModeUniqueFilesByContents || opts.countMode == countModeBlobsPerFile {
// only count this file if we haven't visited it before
fid := makeFileIDByContents(node)
if _, ok := stats.uniqueFiles[fid]; !ok {
// mark the file as visited
stats.uniqueFiles[fid] = struct{}{}
- if statsOptions.countMode == countModeUniqueFilesByContents {
+ if opts.countMode == countModeUniqueFilesByContents {
// simply count the size of each unique file (unique by contents only)
stats.TotalSize += node.Size
stats.TotalFileCount++
}
- if statsOptions.countMode == countModeBlobsPerFile {
+ if opts.countMode == countModeBlobsPerFile {
// count the size of each unique blob reference, which is
// by unique file (unique by contents and file path)
for _, blobID := range node.Content {
@@ -254,7 +262,7 @@ func statsWalkTree(repo restic.Repository, stats *statsContainer, uniqueInodes m
}
}
- if statsOptions.countMode == countModeRestoreSize {
+ if opts.countMode == countModeRestoreSize {
// as this is a file in the snapshot, we can simply count its
// size without worrying about uniqueness, since duplicate files
// will still be restored
@@ -284,15 +292,16 @@ func makeFileIDByContents(node *restic.Node) fileID {
return sha256.Sum256(bb)
}
-func verifyStatsInput(gopts GlobalOptions, args []string) error {
+func verifyStatsInput(opts StatsOptions) error {
// require a recognized counting mode
- switch statsOptions.countMode {
+ switch opts.countMode {
case countModeRestoreSize:
case countModeUniqueFilesByContents:
case countModeBlobsPerFile:
case countModeRawData:
+ case countModeDebug:
default:
- return fmt.Errorf("unknown counting mode: %s (use the -h flag to get a list of supported modes)", statsOptions.countMode)
+ return fmt.Errorf("unknown counting mode: %s (use the -h flag to get a list of supported modes)", opts.countMode)
}
return nil
@@ -335,4 +344,149 @@ const (
countModeUniqueFilesByContents = "files-by-contents"
countModeBlobsPerFile = "blobs-per-file"
countModeRawData = "raw-data"
+ countModeDebug = "debug"
)
+
+func statsDebug(ctx context.Context, repo restic.Repository) error {
+ Warnf("Collecting size statistics\n\n")
+ for _, t := range []restic.FileType{restic.KeyFile, restic.LockFile, restic.IndexFile, restic.PackFile} {
+ hist, err := statsDebugFileType(ctx, repo, t)
+ if err != nil {
+ return err
+ }
+ Warnf("File Type: %v\n%v\n", t, hist)
+ }
+
+ hist := statsDebugBlobs(ctx, repo)
+ for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} {
+ Warnf("Blob Type: %v\n%v\n\n", t, hist[t])
+ }
+
+ return nil
+}
+
+func statsDebugFileType(ctx context.Context, repo restic.Repository, tpe restic.FileType) (*sizeHistogram, error) {
+ hist := newSizeHistogram(2 * repository.MaxPackSize)
+ err := repo.List(ctx, tpe, func(id restic.ID, size int64) error {
+ hist.Add(uint64(size))
+ return nil
+ })
+
+ return hist, err
+}
+
+func statsDebugBlobs(ctx context.Context, repo restic.Repository) [restic.NumBlobTypes]*sizeHistogram {
+ var hist [restic.NumBlobTypes]*sizeHistogram
+ for i := 0; i < len(hist); i++ {
+ hist[i] = newSizeHistogram(2 * chunker.MaxSize)
+ }
+
+ repo.Index().Each(ctx, func(pb restic.PackedBlob) {
+ hist[pb.Type].Add(uint64(pb.Length))
+ })
+
+ return hist
+}
+
+type sizeClass struct {
+ lower, upper uint64
+ count int64
+}
+
+type sizeHistogram struct {
+ count int64
+ totalSize uint64
+ buckets []sizeClass
+ oversized []uint64
+}
+
+func newSizeHistogram(sizeLimit uint64) *sizeHistogram {
+ h := &sizeHistogram{}
+ h.buckets = append(h.buckets, sizeClass{0, 0, 0})
+
+ lowerBound := uint64(1)
+ growthFactor := uint64(10)
+
+ for lowerBound < sizeLimit {
+ upperBound := lowerBound*growthFactor - 1
+ if upperBound > sizeLimit {
+ upperBound = sizeLimit
+ }
+ h.buckets = append(h.buckets, sizeClass{lowerBound, upperBound, 0})
+ lowerBound *= growthFactor
+ }
+
+ return h
+}
+
+func (s *sizeHistogram) Add(size uint64) {
+ s.count++
+ s.totalSize += size
+
+ for i, bucket := range s.buckets {
+ if size >= bucket.lower && size <= bucket.upper {
+ s.buckets[i].count++
+ return
+ }
+ }
+
+ s.oversized = append(s.oversized, size)
+}
+
+func (s sizeHistogram) String() string {
+ var out strings.Builder
+
+ out.WriteString(fmt.Sprintf("Count: %d\n", s.count))
+ out.WriteString(fmt.Sprintf("Total Size: %s\n", ui.FormatBytes(s.totalSize)))
+
+ t := table.New()
+ t.AddColumn("Size", "{{.SizeRange}}")
+ t.AddColumn("Count", "{{.Count}}")
+ type line struct {
+ SizeRange string
+ Count int64
+ }
+
+ // only print up to the highest used bucket size
+ lastFilledIdx := 0
+ for i := 0; i < len(s.buckets); i++ {
+ if s.buckets[i].count != 0 {
+ lastFilledIdx = i
+ }
+ }
+
+ var lines []line
+ hasStarted := false
+ for i, b := range s.buckets {
+ if i > lastFilledIdx {
+ break
+ }
+
+ if b.count > 0 {
+ hasStarted = true
+ }
+ if hasStarted {
+ lines = append(lines, line{
+ SizeRange: fmt.Sprintf("%d - %d Byte", b.lower, b.upper),
+ Count: b.count,
+ })
+ }
+ }
+ longestRange := 0
+ for _, l := range lines {
+ if longestRange < len(l.SizeRange) {
+ longestRange = len(l.SizeRange)
+ }
+ }
+ for i := range lines {
+ lines[i].SizeRange = strings.Repeat(" ", longestRange-len(lines[i].SizeRange)) + lines[i].SizeRange
+ t.AddRow(lines[i])
+ }
+
+ _ = t.Write(&out)
+
+ if len(s.oversized) > 0 {
+ out.WriteString(fmt.Sprintf("Oversized: %v\n", s.oversized))
+ }
+ return out.String()
+}
diff --git a/cmd/restic/cmd_stats_test.go b/cmd/restic/cmd_stats_test.go
new file mode 100644
index 000000000..02d37acd9
--- /dev/null
+++ b/cmd/restic/cmd_stats_test.go
@@ -0,0 +1,62 @@
+package main
+
+import (
+ "testing"
+
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func TestSizeHistogramNew(t *testing.T) {
+ h := newSizeHistogram(42)
+
+ exp := &sizeHistogram{
+ count: 0,
+ totalSize: 0,
+ buckets: []sizeClass{
+ {0, 0, 0},
+ {1, 9, 0},
+ {10, 42, 0},
+ },
+ }
+
+ rtest.Equals(t, exp, h)
+}
+
+func TestSizeHistogramAdd(t *testing.T) {
+ h := newSizeHistogram(42)
+ for i := uint64(0); i < 45; i++ {
+ h.Add(i)
+ }
+
+ exp := &sizeHistogram{
+ count: 45,
+ totalSize: 990,
+ buckets: []sizeClass{
+ {0, 0, 1},
+ {1, 9, 9},
+ {10, 42, 33},
+ },
+ oversized: []uint64{43, 44},
+ }
+
+ rtest.Equals(t, exp, h)
+}
+
+func TestSizeHistogramString(t *testing.T) {
+ t.Run("overflow", func(t *testing.T) {
+ h := newSizeHistogram(42)
+ h.Add(8)
+ h.Add(50)
+
+ rtest.Equals(t, "Count: 2\nTotal Size: 58 B\nSize Count\n-----------------\n1 - 9 Byte 1\n-----------------\nOversized: [50]\n", h.String())
+ })
+
+ t.Run("withZero", func(t *testing.T) {
+ h := newSizeHistogram(42)
+ h.Add(0)
+ h.Add(1)
+ h.Add(10)
+
+ rtest.Equals(t, "Count: 3\nTotal Size: 11 B\nSize Count\n-------------------\n 0 - 0 Byte 1\n 1 - 9 Byte 1\n10 - 42 Byte 1\n-------------------\n", h.String())
+ })
+}
diff --git a/cmd/restic/cmd_tag.go b/cmd/restic/cmd_tag.go
index e5948ea02..fe4638547 100644
--- a/cmd/restic/cmd_tag.go
+++ b/cmd/restic/cmd_tag.go
@@ -111,7 +111,7 @@ func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, args []st
if !gopts.NoLock {
Verbosef("create exclusive lock for repository\n")
var lock *restic.Lock
- lock, ctx, err = lockRepoExclusive(ctx, repo)
+ lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
diff --git a/cmd/restic/cmd_tag_integration_test.go b/cmd/restic/cmd_tag_integration_test.go
new file mode 100644
index 000000000..3b902c51e
--- /dev/null
+++ b/cmd/restic/cmd_tag_integration_test.go
@@ -0,0 +1,94 @@
+package main
+
+import (
+ "context"
+ "testing"
+
+ "github.com/restic/restic/internal/restic"
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func testRunTag(t testing.TB, opts TagOptions, gopts GlobalOptions) {
+ rtest.OK(t, runTag(context.TODO(), opts, gopts, []string{}))
+}
+
+func TestTag(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testSetupBackupData(t, env)
+ testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
+ testRunCheck(t, env.gopts)
+ newest, _ := testRunSnapshots(t, env.gopts)
+ if newest == nil {
+ t.Fatal("expected a new backup, got nil")
+ }
+
+ rtest.Assert(t, len(newest.Tags) == 0,
+ "expected no tags, got %v", newest.Tags)
+ rtest.Assert(t, newest.Original == nil,
+ "expected original ID to be nil, got %v", newest.Original)
+ originalID := *newest.ID
+
+ testRunTag(t, TagOptions{SetTags: restic.TagLists{[]string{"NL"}}}, env.gopts)
+ testRunCheck(t, env.gopts)
+ newest, _ = testRunSnapshots(t, env.gopts)
+ if newest == nil {
+ t.Fatal("expected a backup, got nil")
+ }
+ rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL",
+ "set failed, expected one NL tag, got %v", newest.Tags)
+ rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
+ rtest.Assert(t, *newest.Original == originalID,
+ "expected original ID to be set to the first snapshot id")
+
+ testRunTag(t, TagOptions{AddTags: restic.TagLists{[]string{"CH"}}}, env.gopts)
+ testRunCheck(t, env.gopts)
+ newest, _ = testRunSnapshots(t, env.gopts)
+ if newest == nil {
+ t.Fatal("expected a backup, got nil")
+ }
+ rtest.Assert(t, len(newest.Tags) == 2 && newest.Tags[0] == "NL" && newest.Tags[1] == "CH",
+ "add failed, expected CH,NL tags, got %v", newest.Tags)
+ rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
+ rtest.Assert(t, *newest.Original == originalID,
+ "expected original ID to be set to the first snapshot id")
+
+ testRunTag(t, TagOptions{RemoveTags: restic.TagLists{[]string{"NL"}}}, env.gopts)
+ testRunCheck(t, env.gopts)
+ newest, _ = testRunSnapshots(t, env.gopts)
+ if newest == nil {
+ t.Fatal("expected a backup, got nil")
+ }
+ rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "CH",
+ "remove failed, expected one CH tag, got %v", newest.Tags)
+ rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
+ rtest.Assert(t, *newest.Original == originalID,
+ "expected original ID to be set to the first snapshot id")
+
+ testRunTag(t, TagOptions{AddTags: restic.TagLists{[]string{"US", "RU"}}}, env.gopts)
+ testRunTag(t, TagOptions{RemoveTags: restic.TagLists{[]string{"CH", "US", "RU"}}}, env.gopts)
+ testRunCheck(t, env.gopts)
+ newest, _ = testRunSnapshots(t, env.gopts)
+ if newest == nil {
+ t.Fatal("expected a backup, got nil")
+ }
+ rtest.Assert(t, len(newest.Tags) == 0,
+ "expected no tags, got %v", newest.Tags)
+ rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
+ rtest.Assert(t, *newest.Original == originalID,
+ "expected original ID to be set to the first snapshot id")
+
+ // Check special case of removing all tags.
+ testRunTag(t, TagOptions{SetTags: restic.TagLists{[]string{""}}}, env.gopts)
+ testRunCheck(t, env.gopts)
+ newest, _ = testRunSnapshots(t, env.gopts)
+ if newest == nil {
+ t.Fatal("expected a backup, got nil")
+ }
+ rtest.Assert(t, len(newest.Tags) == 0,
+ "expected no tags, got %v", newest.Tags)
+ rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
+ rtest.Assert(t, *newest.Original == originalID,
+ "expected original ID to be set to the first snapshot id")
+}
diff --git a/cmd/restic/exclude.go b/cmd/restic/exclude.go
index efe6f41e4..095944610 100644
--- a/cmd/restic/exclude.go
+++ b/cmd/restic/exclude.go
@@ -7,7 +7,6 @@ import (
"io"
"os"
"path/filepath"
- "strconv"
"strings"
"sync"
@@ -17,6 +16,7 @@ import (
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/textfile"
+ "github.com/restic/restic/internal/ui"
"github.com/spf13/pflag"
)
@@ -364,7 +364,7 @@ func rejectResticCache(repo *repository.Repository) (RejectByNameFunc, error) {
}
func rejectBySize(maxSizeStr string) (RejectFunc, error) {
- maxSize, err := parseSizeStr(maxSizeStr)
+ maxSize, err := ui.ParseBytes(maxSizeStr)
if err != nil {
return nil, err
}
@@ -385,35 +385,6 @@ func rejectBySize(maxSizeStr string) (RejectFunc, error) {
}, nil
}
-func parseSizeStr(sizeStr string) (int64, error) {
- if sizeStr == "" {
- return 0, errors.New("expected size, got empty string")
- }
-
- numStr := sizeStr[:len(sizeStr)-1]
- var unit int64 = 1
-
- switch sizeStr[len(sizeStr)-1] {
- case 'b', 'B':
- // use initialized values, do nothing here
- case 'k', 'K':
- unit = 1024
- case 'm', 'M':
- unit = 1024 * 1024
- case 'g', 'G':
- unit = 1024 * 1024 * 1024
- case 't', 'T':
- unit = 1024 * 1024 * 1024 * 1024
- default:
- numStr = sizeStr
- }
- value, err := strconv.ParseInt(numStr, 10, 64)
- if err != nil {
- return 0, err
- }
- return value * unit, nil
-}
-
// readExcludePatternsFromFiles reads all exclude files and returns the list of
// exclude patterns. For each line, leading and trailing white space is removed
// and comment lines are ignored. For each remaining pattern, environment
diff --git a/cmd/restic/exclude_test.go b/cmd/restic/exclude_test.go
index 050a083e4..9a24418ae 100644
--- a/cmd/restic/exclude_test.go
+++ b/cmd/restic/exclude_test.go
@@ -187,54 +187,6 @@ func TestMultipleIsExcludedByFile(t *testing.T) {
}
}
-func TestParseSizeStr(t *testing.T) {
- sizeStrTests := []struct {
- in string
- expected int64
- }{
- {"1024", 1024},
- {"1024b", 1024},
- {"1024B", 1024},
- {"1k", 1024},
- {"100k", 102400},
- {"100K", 102400},
- {"10M", 10485760},
- {"100m", 104857600},
- {"20G", 21474836480},
- {"10g", 10737418240},
- {"2T", 2199023255552},
- {"2t", 2199023255552},
- }
-
- for _, tt := range sizeStrTests {
- actual, err := parseSizeStr(tt.in)
- test.OK(t, err)
-
- if actual != tt.expected {
- t.Errorf("parseSizeStr(%s) = %d; expected %d", tt.in, actual, tt.expected)
- }
- }
-}
-
-func TestParseInvalidSizeStr(t *testing.T) {
- invalidSizes := []string{
- "",
- " ",
- "foobar",
- "zzz",
- }
-
- for _, s := range invalidSizes {
- v, err := parseSizeStr(s)
- if err == nil {
- t.Errorf("wanted error for invalid value %q, got nil", s)
- }
- if v != 0 {
- t.Errorf("wanted zero for invalid value %q, got: %v", s, v)
- }
- }
-}
-
// TestIsExcludedByFileSize is for testing the instance of
// --exclude-larger-than parameters
func TestIsExcludedByFileSize(t *testing.T) {
diff --git a/cmd/restic/format.go b/cmd/restic/format.go
index 2f14a4575..063cd4e71 100644
--- a/cmd/restic/format.go
+++ b/cmd/restic/format.go
@@ -5,9 +5,10 @@ import (
"os"
"github.com/restic/restic/internal/restic"
+ "github.com/restic/restic/internal/ui"
)
-func formatNode(path string, n *restic.Node, long bool) string {
+func formatNode(path string, n *restic.Node, long bool, human bool) string {
if !long {
return path
}
@@ -15,6 +16,13 @@ func formatNode(path string, n *restic.Node, long bool) string {
var mode os.FileMode
var target string
+ var size string
+ if human {
+ size = ui.FormatBytes(n.Size)
+ } else {
+ size = fmt.Sprintf("%6d", n.Size)
+ }
+
switch n.Type {
case "file":
mode = 0
@@ -33,8 +41,8 @@ func formatNode(path string, n *restic.Node, long bool) string {
mode = os.ModeSocket
}
- return fmt.Sprintf("%s %5d %5d %6d %s %s%s",
- mode|n.Mode, n.UID, n.GID, n.Size,
+ return fmt.Sprintf("%s %5d %5d %s %s %s%s",
+ mode|n.Mode, n.UID, n.GID, size,
n.ModTime.Local().Format(TimeFormat), path,
target)
}
diff --git a/cmd/restic/format_test.go b/cmd/restic/format_test.go
new file mode 100644
index 000000000..689bd27a5
--- /dev/null
+++ b/cmd/restic/format_test.go
@@ -0,0 +1,61 @@
+package main
+
+import (
+ "testing"
+ "time"
+
+ "github.com/restic/restic/internal/restic"
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func TestFormatNode(t *testing.T) {
+ // overwrite time zone to ensure the data is formatted reproducibly
+ tz := time.Local
+ time.Local = time.UTC
+ defer func() {
+ time.Local = tz
+ }()
+
+ testPath := "/test/path"
+ node := restic.Node{
+ Name: "baz",
+ Type: "file",
+ Size: 14680064,
+ UID: 1000,
+ GID: 2000,
+ ModTime: time.Date(2020, 1, 2, 3, 4, 5, 0, time.UTC),
+ }
+
+ for _, c := range []struct {
+ path string
+ restic.Node
+ long bool
+ human bool
+ expect string
+ }{
+ {
+ path: testPath,
+ Node: node,
+ long: false,
+ human: false,
+ expect: testPath,
+ },
+ {
+ path: testPath,
+ Node: node,
+ long: true,
+ human: false,
+ expect: "---------- 1000 2000 14680064 2020-01-02 03:04:05 " + testPath,
+ },
+ {
+ path: testPath,
+ Node: node,
+ long: true,
+ human: true,
+ expect: "---------- 1000 2000 14.000 MiB 2020-01-02 03:04:05 " + testPath,
+ },
+ } {
+ r := formatNode(c.path, &c.Node, c.long, c.human)
+ rtest.Equals(t, c.expect, r)
+ }
+}
diff --git a/cmd/restic/global.go b/cmd/restic/global.go
index 206229d94..63e13c3ae 100644
--- a/cmd/restic/global.go
+++ b/cmd/restic/global.go
@@ -10,7 +10,6 @@ import (
"runtime"
"strconv"
"strings"
- "syscall"
"time"
"github.com/restic/restic/internal/backend"
@@ -20,10 +19,12 @@ import (
"github.com/restic/restic/internal/backend/limiter"
"github.com/restic/restic/internal/backend/local"
"github.com/restic/restic/internal/backend/location"
+ "github.com/restic/restic/internal/backend/logger"
"github.com/restic/restic/internal/backend/rclone"
"github.com/restic/restic/internal/backend/rest"
"github.com/restic/restic/internal/backend/retry"
"github.com/restic/restic/internal/backend/s3"
+ "github.com/restic/restic/internal/backend/sema"
"github.com/restic/restic/internal/backend/sftp"
"github.com/restic/restic/internal/backend/swift"
"github.com/restic/restic/internal/cache"
@@ -42,7 +43,7 @@ import (
"golang.org/x/term"
)
-var version = "0.15.2"
+var version = "0.16.0"
// TimeFormat is the format used for all timestamps printed by restic.
const TimeFormat = "2006-01-02 15:04:05"
@@ -59,6 +60,7 @@ type GlobalOptions struct {
Quiet bool
Verbose int
NoLock bool
+ RetryLock time.Duration
JSON bool
CacheDir string
NoCache bool
@@ -73,6 +75,7 @@ type GlobalOptions struct {
stdout io.Writer
stderr io.Writer
+ backends *location.Registry
backendTestHook, backendInnerTestHook backendWrapper
// verbosity is set as follows:
@@ -96,6 +99,18 @@ var isReadingPassword bool
var internalGlobalCtx context.Context
func init() {
+ backends := location.NewRegistry()
+ backends.Register(azure.NewFactory())
+ backends.Register(b2.NewFactory())
+ backends.Register(gs.NewFactory())
+ backends.Register(local.NewFactory())
+ backends.Register(rclone.NewFactory())
+ backends.Register(rest.NewFactory())
+ backends.Register(s3.NewFactory())
+ backends.Register(sftp.NewFactory())
+ backends.Register(swift.NewFactory())
+ globalOptions.backends = backends
+
var cancel context.CancelFunc
internalGlobalCtx, cancel = context.WithCancel(context.Background())
AddCleanupHandler(func(code int) (int, error) {
@@ -115,14 +130,15 @@ func init() {
// use empty paremeter name as `-v, --verbose n` instead of the correct `--verbose=n` is confusing
f.CountVarP(&globalOptions.Verbose, "verbose", "v", "be verbose (specify multiple times or a level using --verbose=n``, max level/times is 2)")
f.BoolVar(&globalOptions.NoLock, "no-lock", false, "do not lock the repository, this allows some operations on read-only repositories")
+ f.DurationVar(&globalOptions.RetryLock, "retry-lock", 0, "retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)")
f.BoolVarP(&globalOptions.JSON, "json", "", false, "set output mode to JSON for commands that support it")
f.StringVar(&globalOptions.CacheDir, "cache-dir", "", "set the cache `directory`. (default: use system default cache directory)")
f.BoolVar(&globalOptions.NoCache, "no-cache", false, "do not use a local cache")
- f.StringSliceVar(&globalOptions.RootCertFilenames, "cacert", nil, "`file` to load root certificates from (default: use system certificates)")
- f.StringVar(&globalOptions.TLSClientCertKeyFilename, "tls-client-cert", "", "path to a `file` containing PEM encoded TLS client certificate and private key")
+ f.StringSliceVar(&globalOptions.RootCertFilenames, "cacert", nil, "`file` to load root certificates from (default: use system certificates or $RESTIC_CACERT)")
+ f.StringVar(&globalOptions.TLSClientCertKeyFilename, "tls-client-cert", "", "path to a `file` containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)")
f.BoolVar(&globalOptions.InsecureTLS, "insecure-tls", false, "skip TLS certificate verification when connecting to the repository (insecure)")
f.BoolVar(&globalOptions.CleanupCache, "cleanup-cache", false, "auto remove old cache directories")
- f.Var(&globalOptions.Compression, "compression", "compression mode (only available for repository format version 2), one of (auto|off|max)")
+ f.Var(&globalOptions.Compression, "compression", "compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)")
f.IntVar(&globalOptions.Limits.UploadKb, "limit-upload", 0, "limits uploads to a maximum `rate` in KiB/s. (default: unlimited)")
f.IntVar(&globalOptions.Limits.DownloadKb, "limit-download", 0, "limits downloads to a maximum `rate` in KiB/s. (default: unlimited)")
f.UintVar(&globalOptions.PackSize, "pack-size", 0, "set target pack `size` in MiB, created pack files may be larger (default: $RESTIC_PACK_SIZE)")
@@ -135,6 +151,10 @@ func init() {
globalOptions.PasswordFile = os.Getenv("RESTIC_PASSWORD_FILE")
globalOptions.KeyHint = os.Getenv("RESTIC_KEY_HINT")
globalOptions.PasswordCommand = os.Getenv("RESTIC_PASSWORD_COMMAND")
+ if os.Getenv("RESTIC_CACERT") != "" {
+ globalOptions.RootCertFilenames = strings.Split(os.Getenv("RESTIC_CACERT"), ",")
+ }
+ globalOptions.TLSClientCertKeyFilename = os.Getenv("RESTIC_TLS_CLIENT_CERT")
comp := os.Getenv("RESTIC_COMPRESSION")
if comp != "" {
// ignore error as there's no good way to handle it
@@ -147,21 +167,6 @@ func init() {
restoreTerminal()
}
-// checkErrno returns nil when err is set to syscall.Errno(0), since this is no
-// error condition.
-func checkErrno(err error) error {
- e, ok := err.(syscall.Errno)
- if !ok {
- return err
- }
-
- if e == 0 {
- return nil
- }
-
- return err
-}
-
func stdinIsTerminal() bool {
return term.IsTerminal(int(os.Stdin.Fd()))
}
@@ -210,7 +215,7 @@ func restoreTerminal() {
if !isReadingPassword {
return code, nil
}
- err := checkErrno(term.Restore(fd, state))
+ err := term.Restore(fd, state)
if err != nil {
fmt.Fprintf(os.Stderr, "unable to restore terminal state: %v\n", err)
}
@@ -280,6 +285,7 @@ func Warnf(format string, args ...interface{}) {
if err != nil {
fmt.Fprintf(os.Stderr, "unable to write to stderr: %v\n", err)
}
+ debug.Log(format, args...)
}
// resolvePassword determines the password to be used for opening the repository.
@@ -451,7 +457,7 @@ func OpenRepository(ctx context.Context, opts GlobalOptions) (*repository.Reposi
PackSize: opts.PackSize * 1024 * 1024,
})
if err != nil {
- return nil, err
+ return nil, errors.Fatal(err.Error())
}
passwordTriesLeft := 1
@@ -546,153 +552,25 @@ func OpenRepository(ctx context.Context, opts GlobalOptions) (*repository.Reposi
}
func parseConfig(loc location.Location, opts options.Options) (interface{}, error) {
+ cfg := loc.Config
+ if cfg, ok := cfg.(restic.ApplyEnvironmenter); ok {
+ cfg.ApplyEnvironment("")
+ }
+
// only apply options for a particular backend here
opts = opts.Extract(loc.Scheme)
-
- switch loc.Scheme {
- case "local":
- cfg := loc.Config.(local.Config)
- if err := opts.Apply(loc.Scheme, &cfg); err != nil {
- return nil, err
- }
-
- debug.Log("opening local repository at %#v", cfg)
- return cfg, nil
-
- case "sftp":
- cfg := loc.Config.(sftp.Config)
- if err := opts.Apply(loc.Scheme, &cfg); err != nil {
- return nil, err
- }
-
- debug.Log("opening sftp repository at %#v", cfg)
- return cfg, nil
-
- case "s3":
- cfg := loc.Config.(s3.Config)
- if cfg.KeyID == "" {
- cfg.KeyID = os.Getenv("AWS_ACCESS_KEY_ID")
- }
-
- if cfg.Secret.String() == "" {
- cfg.Secret = options.NewSecretString(os.Getenv("AWS_SECRET_ACCESS_KEY"))
- }
-
- if cfg.KeyID == "" && cfg.Secret.String() != "" {
- return nil, errors.Fatalf("unable to open S3 backend: Key ID ($AWS_ACCESS_KEY_ID) is empty")
- } else if cfg.KeyID != "" && cfg.Secret.String() == "" {
- return nil, errors.Fatalf("unable to open S3 backend: Secret ($AWS_SECRET_ACCESS_KEY) is empty")
- }
-
- if cfg.Region == "" {
- cfg.Region = os.Getenv("AWS_DEFAULT_REGION")
- }
-
- if err := opts.Apply(loc.Scheme, &cfg); err != nil {
- return nil, err
- }
-
- debug.Log("opening s3 repository at %#v", cfg)
- return cfg, nil
-
- case "gs":
- cfg := loc.Config.(gs.Config)
- if cfg.ProjectID == "" {
- cfg.ProjectID = os.Getenv("GOOGLE_PROJECT_ID")
- }
-
- if err := opts.Apply(loc.Scheme, &cfg); err != nil {
- return nil, err
- }
-
- debug.Log("opening gs repository at %#v", cfg)
- return cfg, nil
-
- case "azure":
- cfg := loc.Config.(azure.Config)
- if cfg.AccountName == "" {
- cfg.AccountName = os.Getenv("AZURE_ACCOUNT_NAME")
- }
-
- if cfg.AccountKey.String() == "" {
- cfg.AccountKey = options.NewSecretString(os.Getenv("AZURE_ACCOUNT_KEY"))
- }
-
- if cfg.AccountSAS.String() == "" {
- cfg.AccountSAS = options.NewSecretString(os.Getenv("AZURE_ACCOUNT_SAS"))
- }
-
- if err := opts.Apply(loc.Scheme, &cfg); err != nil {
- return nil, err
- }
-
- debug.Log("opening gs repository at %#v", cfg)
- return cfg, nil
-
- case "swift":
- cfg := loc.Config.(swift.Config)
-
- if err := swift.ApplyEnvironment("", &cfg); err != nil {
- return nil, err
- }
-
- if err := opts.Apply(loc.Scheme, &cfg); err != nil {
- return nil, err
- }
-
- debug.Log("opening swift repository at %#v", cfg)
- return cfg, nil
-
- case "b2":
- cfg := loc.Config.(b2.Config)
-
- if cfg.AccountID == "" {
- cfg.AccountID = os.Getenv("B2_ACCOUNT_ID")
- }
-
- if cfg.AccountID == "" {
- return nil, errors.Fatalf("unable to open B2 backend: Account ID ($B2_ACCOUNT_ID) is empty")
- }
-
- if cfg.Key.String() == "" {
- cfg.Key = options.NewSecretString(os.Getenv("B2_ACCOUNT_KEY"))
- }
-
- if cfg.Key.String() == "" {
- return nil, errors.Fatalf("unable to open B2 backend: Key ($B2_ACCOUNT_KEY) is empty")
- }
-
- if err := opts.Apply(loc.Scheme, &cfg); err != nil {
- return nil, err
- }
-
- debug.Log("opening b2 repository at %#v", cfg)
- return cfg, nil
- case "rest":
- cfg := loc.Config.(rest.Config)
- if err := opts.Apply(loc.Scheme, &cfg); err != nil {
- return nil, err
- }
-
- debug.Log("opening rest repository at %#v", cfg)
- return cfg, nil
- case "rclone":
- cfg := loc.Config.(rclone.Config)
- if err := opts.Apply(loc.Scheme, &cfg); err != nil {
- return nil, err
- }
-
- debug.Log("opening rest repository at %#v", cfg)
- return cfg, nil
+ if err := opts.Apply(loc.Scheme, cfg); err != nil {
+ return nil, err
}
- return nil, errors.Fatalf("invalid backend: %q", loc.Scheme)
+ debug.Log("opening %v repository at %#v", loc.Scheme, cfg)
+ return cfg, nil
}
// Open the backend specified by a location config.
func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (restic.Backend, error) {
- debug.Log("parsing location %v", location.StripPassword(s))
- loc, err := location.Parse(s)
+ debug.Log("parsing location %v", location.StripPassword(gopts.backends, s))
+ loc, err := location.Parse(gopts.backends, s)
if err != nil {
return nil, errors.Fatalf("parsing repository location failed: %v", err)
}
@@ -706,41 +584,26 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio
rt, err := backend.Transport(globalOptions.TransportOptions)
if err != nil {
- return nil, err
+ return nil, errors.Fatal(err.Error())
}
// wrap the transport so that the throughput via HTTP is limited
lim := limiter.NewStaticLimiter(gopts.Limits)
rt = lim.Transport(rt)
- switch loc.Scheme {
- case "local":
- be, err = local.Open(ctx, cfg.(local.Config))
- case "sftp":
- be, err = sftp.Open(ctx, cfg.(sftp.Config))
- case "s3":
- be, err = s3.Open(ctx, cfg.(s3.Config), rt)
- case "gs":
- be, err = gs.Open(cfg.(gs.Config), rt)
- case "azure":
- be, err = azure.Open(ctx, cfg.(azure.Config), rt)
- case "swift":
- be, err = swift.Open(ctx, cfg.(swift.Config), rt)
- case "b2":
- be, err = b2.Open(ctx, cfg.(b2.Config), rt)
- case "rest":
- be, err = rest.Open(cfg.(rest.Config), rt)
- case "rclone":
- be, err = rclone.Open(cfg.(rclone.Config), lim)
-
- default:
+ factory := gopts.backends.Lookup(loc.Scheme)
+ if factory == nil {
return nil, errors.Fatalf("invalid backend: %q", loc.Scheme)
}
+ be, err = factory.Open(ctx, cfg, rt, lim)
if err != nil {
- return nil, errors.Fatalf("unable to open repository at %v: %v", location.StripPassword(s), err)
+ return nil, errors.Fatalf("unable to open repository at %v: %v", location.StripPassword(gopts.backends, s), err)
}
+ // wrap with debug logging and connection limiting
+ be = logger.New(sema.NewBackend(be))
+
// wrap backend if a test specified an inner hook
if gopts.backendInnerTestHook != nil {
be, err = gopts.backendInnerTestHook(be)
@@ -749,15 +612,10 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio
}
}
- if loc.Scheme == "local" || loc.Scheme == "sftp" {
- // wrap the backend in a LimitBackend so that the throughput is limited
- be = limiter.LimitBackend(be, lim)
- }
-
// check if config is there
fi, err := be.Stat(ctx, restic.Handle{Type: restic.ConfigFile})
if err != nil {
- return nil, errors.Fatalf("unable to open config file: %v\nIs there a repository at the following location?\n%v", err, location.StripPassword(s))
+ return nil, errors.Fatalf("unable to open config file: %v\nIs there a repository at the following location?\n%v", err, location.StripPassword(gopts.backends, s))
}
if fi.Size == 0 {
@@ -768,9 +626,9 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio
}
// Create the backend specified by URI.
-func create(ctx context.Context, s string, opts options.Options) (restic.Backend, error) {
- debug.Log("parsing location %v", s)
- loc, err := location.Parse(s)
+func create(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (restic.Backend, error) {
+ debug.Log("parsing location %v", location.StripPassword(gopts.backends, s))
+ loc, err := location.Parse(gopts.backends, s)
if err != nil {
return nil, err
}
@@ -782,30 +640,18 @@ func create(ctx context.Context, s string, opts options.Options) (restic.Backend
rt, err := backend.Transport(globalOptions.TransportOptions)
if err != nil {
+ return nil, errors.Fatal(err.Error())
+ }
+
+ factory := gopts.backends.Lookup(loc.Scheme)
+ if factory == nil {
+ return nil, errors.Fatalf("invalid backend: %q", loc.Scheme)
+ }
+
+ be, err := factory.Create(ctx, cfg, rt, nil)
+ if err != nil {
return nil, err
}
- switch loc.Scheme {
- case "local":
- return local.Create(ctx, cfg.(local.Config))
- case "sftp":
- return sftp.Create(ctx, cfg.(sftp.Config))
- case "s3":
- return s3.Create(ctx, cfg.(s3.Config), rt)
- case "gs":
- return gs.Create(cfg.(gs.Config), rt)
- case "azure":
- return azure.Create(ctx, cfg.(azure.Config), rt)
- case "swift":
- return swift.Open(ctx, cfg.(swift.Config), rt)
- case "b2":
- return b2.Create(ctx, cfg.(b2.Config), rt)
- case "rest":
- return rest.Create(ctx, cfg.(rest.Config), rt)
- case "rclone":
- return rclone.Create(ctx, cfg.(rclone.Config))
- }
-
- debug.Log("invalid repository scheme: %v", s)
- return nil, errors.Fatalf("invalid scheme %q", loc.Scheme)
+ return logger.New(sema.NewBackend(be)), nil
}
diff --git a/cmd/restic/global_test.go b/cmd/restic/global_test.go
index 85a9514b9..4f5c29e9a 100644
--- a/cmd/restic/global_test.go
+++ b/cmd/restic/global_test.go
@@ -1,37 +1,29 @@
package main
import (
- "bytes"
"os"
"path/filepath"
"testing"
- "github.com/restic/restic/internal/test"
rtest "github.com/restic/restic/internal/test"
)
func Test_PrintFunctionsRespectsGlobalStdout(t *testing.T) {
- gopts := globalOptions
- defer func() {
- globalOptions = gopts
- }()
-
- buf := bytes.NewBuffer(nil)
- globalOptions.stdout = buf
-
for _, p := range []func(){
func() { Println("message") },
func() { Print("message\n") },
func() { Printf("mes%s\n", "sage") },
} {
- p()
+ buf, _ := withCaptureStdout(func() error {
+ p()
+ return nil
+ })
rtest.Equals(t, "message\n", buf.String())
- buf.Reset()
}
}
func TestReadRepo(t *testing.T) {
- tempDir := test.TempDir(t)
+ tempDir := rtest.TempDir(t)
// test --repo option
var opts GlobalOptions
diff --git a/cmd/restic/integration_filter_pattern_test.go b/cmd/restic/integration_filter_pattern_test.go
index ea5753d20..2eacdeea9 100644
--- a/cmd/restic/integration_filter_pattern_test.go
+++ b/cmd/restic/integration_filter_pattern_test.go
@@ -70,28 +70,28 @@ func TestRestoreFailsWhenUsingInvalidPatterns(t *testing.T) {
var err error
// Test --exclude
- err = testRunRestoreAssumeFailure(t, "latest", RestoreOptions{Exclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
+ err = testRunRestoreAssumeFailure("latest", RestoreOptions{Exclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
rtest.Equals(t, `Fatal: --exclude: invalid pattern(s) provided:
*[._]log[.-][0-9]
!*[._]log[.-][0-9]`, err.Error())
// Test --iexclude
- err = testRunRestoreAssumeFailure(t, "latest", RestoreOptions{InsensitiveExclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
+ err = testRunRestoreAssumeFailure("latest", RestoreOptions{InsensitiveExclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
rtest.Equals(t, `Fatal: --iexclude: invalid pattern(s) provided:
*[._]log[.-][0-9]
!*[._]log[.-][0-9]`, err.Error())
// Test --include
- err = testRunRestoreAssumeFailure(t, "latest", RestoreOptions{Include: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
+ err = testRunRestoreAssumeFailure("latest", RestoreOptions{Include: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
rtest.Equals(t, `Fatal: --include: invalid pattern(s) provided:
*[._]log[.-][0-9]
!*[._]log[.-][0-9]`, err.Error())
// Test --iinclude
- err = testRunRestoreAssumeFailure(t, "latest", RestoreOptions{InsensitiveInclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
+ err = testRunRestoreAssumeFailure("latest", RestoreOptions{InsensitiveInclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
rtest.Equals(t, `Fatal: --iinclude: invalid pattern(s) provided:
*[._]log[.-][0-9]
diff --git a/cmd/restic/integration_helpers_test.go b/cmd/restic/integration_helpers_test.go
index 655aa9335..b7cb5b333 100644
--- a/cmd/restic/integration_helpers_test.go
+++ b/cmd/restic/integration_helpers_test.go
@@ -2,17 +2,23 @@ package main
import (
"bytes"
+ "context"
+ "crypto/rand"
"fmt"
+ "io"
"os"
"path/filepath"
"runtime"
+ "sync"
"testing"
"github.com/restic/restic/internal/backend/retry"
+ "github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/options"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
+ "github.com/restic/restic/internal/ui/termstatus"
)
type dirEntry struct {
@@ -200,6 +206,8 @@ func withTestEnvironment(t testing.TB) (env *testEnvironment, cleanup func()) {
// replace this hook with "nil" if listing a filetype more than once is necessary
backendTestHook: func(r restic.Backend) (restic.Backend, error) { return newOrderedListOnceBackend(r), nil },
+ // start with default set of backends
+ backends: globalOptions.backends,
}
// always overwrite global options
@@ -215,3 +223,157 @@ func withTestEnvironment(t testing.TB) (env *testEnvironment, cleanup func()) {
return env, cleanup
}
+
+func testSetupBackupData(t testing.TB, env *testEnvironment) string {
+ datafile := filepath.Join("testdata", "backup-data.tar.gz")
+ testRunInit(t, env.gopts)
+ rtest.SetupTarTestFixture(t, env.testdata, datafile)
+ return datafile
+}
+
+func listPacks(gopts GlobalOptions, t *testing.T) restic.IDSet {
+ r, err := OpenRepository(context.TODO(), gopts)
+ rtest.OK(t, err)
+
+ packs := restic.NewIDSet()
+
+ rtest.OK(t, r.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error {
+ packs.Insert(id)
+ return nil
+ }))
+ return packs
+}
+
+func removePacks(gopts GlobalOptions, t testing.TB, remove restic.IDSet) {
+ r, err := OpenRepository(context.TODO(), gopts)
+ rtest.OK(t, err)
+
+ for id := range remove {
+ rtest.OK(t, r.Backend().Remove(context.TODO(), restic.Handle{Type: restic.PackFile, Name: id.String()}))
+ }
+}
+
+func removePacksExcept(gopts GlobalOptions, t testing.TB, keep restic.IDSet, removeTreePacks bool) {
+ r, err := OpenRepository(context.TODO(), gopts)
+ rtest.OK(t, err)
+
+ // Get all tree packs
+ rtest.OK(t, r.LoadIndex(context.TODO()))
+
+ treePacks := restic.NewIDSet()
+ r.Index().Each(context.TODO(), func(pb restic.PackedBlob) {
+ if pb.Type == restic.TreeBlob {
+ treePacks.Insert(pb.PackID)
+ }
+ })
+
+ // remove all packs containing data blobs
+ rtest.OK(t, r.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error {
+ if treePacks.Has(id) != removeTreePacks || keep.Has(id) {
+ return nil
+ }
+ return r.Backend().Remove(context.TODO(), restic.Handle{Type: restic.PackFile, Name: id.String()})
+ }))
+}
+
+func includes(haystack []string, needle string) bool {
+ for _, s := range haystack {
+ if s == needle {
+ return true
+ }
+ }
+
+ return false
+}
+
+func loadSnapshotMap(t testing.TB, gopts GlobalOptions) map[string]struct{} {
+ snapshotIDs := testRunList(t, "snapshots", gopts)
+
+ m := make(map[string]struct{})
+ for _, id := range snapshotIDs {
+ m[id.String()] = struct{}{}
+ }
+
+ return m
+}
+
+func lastSnapshot(old, new map[string]struct{}) (map[string]struct{}, string) {
+ for k := range new {
+ if _, ok := old[k]; !ok {
+ old[k] = struct{}{}
+ return old, k
+ }
+ }
+
+ return old, ""
+}
+
+func appendRandomData(filename string, bytes uint) error {
+ f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ fmt.Fprint(os.Stderr, err)
+ return err
+ }
+
+ _, err = f.Seek(0, 2)
+ if err != nil {
+ fmt.Fprint(os.Stderr, err)
+ return err
+ }
+
+ _, err = io.Copy(f, io.LimitReader(rand.Reader, int64(bytes)))
+ if err != nil {
+ fmt.Fprint(os.Stderr, err)
+ return err
+ }
+
+ return f.Close()
+}
+
+func testFileSize(filename string, size int64) error {
+ fi, err := os.Stat(filename)
+ if err != nil {
+ return err
+ }
+
+ if fi.Size() != size {
+ return errors.Fatalf("wrong file size for %v: expected %v, got %v", filename, size, fi.Size())
+ }
+
+ return nil
+}
+
+func withRestoreGlobalOptions(inner func() error) error {
+ gopts := globalOptions
+ defer func() {
+ globalOptions = gopts
+ }()
+ return inner()
+}
+
+func withCaptureStdout(inner func() error) (*bytes.Buffer, error) {
+ buf := bytes.NewBuffer(nil)
+ err := withRestoreGlobalOptions(func() error {
+ globalOptions.stdout = buf
+ return inner()
+ })
+
+ return buf, err
+}
+
+func withTermStatus(gopts GlobalOptions, callback func(ctx context.Context, term *termstatus.Terminal) error) error {
+ ctx, cancel := context.WithCancel(context.TODO())
+ var wg sync.WaitGroup
+
+ term := termstatus.New(gopts.stdout, gopts.stderr, gopts.Quiet)
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ term.Run(ctx)
+ }()
+
+ defer wg.Wait()
+ defer cancel()
+
+ return callback(ctx, term)
+}
diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go
index c87722f02..8ea4d17d9 100644
--- a/cmd/restic/integration_test.go
+++ b/cmd/restic/integration_test.go
@@ -1,1561 +1,18 @@
package main
import (
- "bufio"
- "bytes"
"context"
- "crypto/rand"
- "encoding/json"
"fmt"
"io"
- mrand "math/rand"
"os"
"path/filepath"
- "regexp"
- "runtime"
- "strings"
- "sync"
- "syscall"
"testing"
- "time"
"github.com/restic/restic/internal/errors"
- "github.com/restic/restic/internal/filter"
- "github.com/restic/restic/internal/fs"
- "github.com/restic/restic/internal/index"
- "github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
- "github.com/restic/restic/internal/ui/termstatus"
- "golang.org/x/sync/errgroup"
)
-func parseIDsFromReader(t testing.TB, rd io.Reader) restic.IDs {
- IDs := restic.IDs{}
- sc := bufio.NewScanner(rd)
-
- for sc.Scan() {
- id, err := restic.ParseID(sc.Text())
- if err != nil {
- t.Logf("parse id %v: %v", sc.Text(), err)
- continue
- }
-
- IDs = append(IDs, id)
- }
-
- return IDs
-}
-
-func testRunInit(t testing.TB, opts GlobalOptions) {
- repository.TestUseLowSecurityKDFParameters(t)
- restic.TestDisableCheckPolynomial(t)
- restic.TestSetLockTimeout(t, 0)
-
- rtest.OK(t, runInit(context.TODO(), InitOptions{}, opts, nil))
- t.Logf("repository initialized at %v", opts.Repo)
-}
-
-func testRunBackupAssumeFailure(t testing.TB, dir string, target []string, opts BackupOptions, gopts GlobalOptions) error {
- ctx, cancel := context.WithCancel(context.TODO())
- defer cancel()
-
- var wg errgroup.Group
- term := termstatus.New(gopts.stdout, gopts.stderr, gopts.Quiet)
- wg.Go(func() error { term.Run(ctx); return nil })
-
- gopts.stdout = io.Discard
- t.Logf("backing up %v in %v", target, dir)
- if dir != "" {
- cleanup := rtest.Chdir(t, dir)
- defer cleanup()
- }
-
- backupErr := runBackup(ctx, opts, gopts, term, target)
-
- cancel()
-
- err := wg.Wait()
- if err != nil {
- t.Fatal(err)
- }
-
- return backupErr
-}
-
-func testRunBackup(t testing.TB, dir string, target []string, opts BackupOptions, gopts GlobalOptions) {
- err := testRunBackupAssumeFailure(t, dir, target, opts, gopts)
- rtest.Assert(t, err == nil, "Error while backing up")
-}
-
-func testRunList(t testing.TB, tpe string, opts GlobalOptions) restic.IDs {
- buf := bytes.NewBuffer(nil)
- globalOptions.stdout = buf
- defer func() {
- globalOptions.stdout = os.Stdout
- }()
-
- rtest.OK(t, runList(context.TODO(), cmdList, opts, []string{tpe}))
- return parseIDsFromReader(t, buf)
-}
-
-func testRunRestore(t testing.TB, opts GlobalOptions, dir string, snapshotID restic.ID) {
- testRunRestoreExcludes(t, opts, dir, snapshotID, nil)
-}
-
-func testRunRestoreLatest(t testing.TB, gopts GlobalOptions, dir string, paths []string, hosts []string) {
- opts := RestoreOptions{
- Target: dir,
- SnapshotFilter: restic.SnapshotFilter{
- Hosts: hosts,
- Paths: paths,
- },
- }
-
- rtest.OK(t, runRestore(context.TODO(), opts, gopts, []string{"latest"}))
-}
-
-func testRunRestoreExcludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, excludes []string) {
- opts := RestoreOptions{
- Target: dir,
- Exclude: excludes,
- }
-
- rtest.OK(t, runRestore(context.TODO(), opts, gopts, []string{snapshotID.String()}))
-}
-
-func testRunRestoreIncludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, includes []string) {
- opts := RestoreOptions{
- Target: dir,
- Include: includes,
- }
-
- rtest.OK(t, runRestore(context.TODO(), opts, gopts, []string{snapshotID.String()}))
-}
-
-func testRunRestoreAssumeFailure(t testing.TB, snapshotID string, opts RestoreOptions, gopts GlobalOptions) error {
- err := runRestore(context.TODO(), opts, gopts, []string{snapshotID})
-
- return err
-}
-
-func testRunCheck(t testing.TB, gopts GlobalOptions) {
- opts := CheckOptions{
- ReadData: true,
- CheckUnused: true,
- }
- rtest.OK(t, runCheck(context.TODO(), opts, gopts, nil))
-}
-
-func testRunCheckOutput(gopts GlobalOptions) (string, error) {
- buf := bytes.NewBuffer(nil)
-
- globalOptions.stdout = buf
- defer func() {
- globalOptions.stdout = os.Stdout
- }()
-
- opts := CheckOptions{
- ReadData: true,
- }
-
- err := runCheck(context.TODO(), opts, gopts, nil)
- return buf.String(), err
-}
-
-func testRunDiffOutput(gopts GlobalOptions, firstSnapshotID string, secondSnapshotID string) (string, error) {
- buf := bytes.NewBuffer(nil)
-
- globalOptions.stdout = buf
- oldStdout := gopts.stdout
- gopts.stdout = buf
- defer func() {
- globalOptions.stdout = os.Stdout
- gopts.stdout = oldStdout
- }()
-
- opts := DiffOptions{
- ShowMetadata: false,
- }
- err := runDiff(context.TODO(), opts, gopts, []string{firstSnapshotID, secondSnapshotID})
- return buf.String(), err
-}
-
-func testRunRebuildIndex(t testing.TB, gopts GlobalOptions) {
- globalOptions.stdout = io.Discard
- defer func() {
- globalOptions.stdout = os.Stdout
- }()
-
- rtest.OK(t, runRebuildIndex(context.TODO(), RebuildIndexOptions{}, gopts))
-}
-
-func testRunLs(t testing.TB, gopts GlobalOptions, snapshotID string) []string {
- buf := bytes.NewBuffer(nil)
- globalOptions.stdout = buf
- quiet := globalOptions.Quiet
- globalOptions.Quiet = true
- defer func() {
- globalOptions.stdout = os.Stdout
- globalOptions.Quiet = quiet
- }()
-
- opts := LsOptions{}
-
- rtest.OK(t, runLs(context.TODO(), opts, gopts, []string{snapshotID}))
-
- return strings.Split(buf.String(), "\n")
-}
-
-func testRunFind(t testing.TB, wantJSON bool, gopts GlobalOptions, pattern string) []byte {
- buf := bytes.NewBuffer(nil)
- globalOptions.stdout = buf
- globalOptions.JSON = wantJSON
- defer func() {
- globalOptions.stdout = os.Stdout
- globalOptions.JSON = false
- }()
-
- opts := FindOptions{}
-
- rtest.OK(t, runFind(context.TODO(), opts, gopts, []string{pattern}))
-
- return buf.Bytes()
-}
-
-func testRunSnapshots(t testing.TB, gopts GlobalOptions) (newest *Snapshot, snapmap map[restic.ID]Snapshot) {
- buf := bytes.NewBuffer(nil)
- globalOptions.stdout = buf
- globalOptions.JSON = true
- defer func() {
- globalOptions.stdout = os.Stdout
- globalOptions.JSON = gopts.JSON
- }()
-
- opts := SnapshotOptions{}
-
- rtest.OK(t, runSnapshots(context.TODO(), opts, globalOptions, []string{}))
-
- snapshots := []Snapshot{}
- rtest.OK(t, json.Unmarshal(buf.Bytes(), &snapshots))
-
- snapmap = make(map[restic.ID]Snapshot, len(snapshots))
- for _, sn := range snapshots {
- snapmap[*sn.ID] = sn
- if newest == nil || sn.Time.After(newest.Time) {
- newest = &sn
- }
- }
- return
-}
-
-func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) {
- opts := ForgetOptions{}
- rtest.OK(t, runForget(context.TODO(), opts, gopts, args))
-}
-
-func testRunForgetJSON(t testing.TB, gopts GlobalOptions, args ...string) {
- buf := bytes.NewBuffer(nil)
- oldJSON := gopts.JSON
- gopts.stdout = buf
- gopts.JSON = true
- defer func() {
- gopts.stdout = os.Stdout
- gopts.JSON = oldJSON
- }()
-
- opts := ForgetOptions{
- DryRun: true,
- Last: 1,
- }
-
- rtest.OK(t, runForget(context.TODO(), opts, gopts, args))
-
- var forgets []*ForgetGroup
- rtest.OK(t, json.Unmarshal(buf.Bytes(), &forgets))
-
- rtest.Assert(t, len(forgets) == 1,
- "Expected 1 snapshot group, got %v", len(forgets))
- rtest.Assert(t, len(forgets[0].Keep) == 1,
- "Expected 1 snapshot to be kept, got %v", len(forgets[0].Keep))
- rtest.Assert(t, len(forgets[0].Remove) == 2,
- "Expected 2 snapshots to be removed, got %v", len(forgets[0].Remove))
-}
-
-func testRunPrune(t testing.TB, gopts GlobalOptions, opts PruneOptions) {
- oldHook := gopts.backendTestHook
- gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) { return newListOnceBackend(r), nil }
- defer func() {
- gopts.backendTestHook = oldHook
- }()
- rtest.OK(t, runPrune(context.TODO(), opts, gopts))
-}
-
-func testSetupBackupData(t testing.TB, env *testEnvironment) string {
- datafile := filepath.Join("testdata", "backup-data.tar.gz")
- testRunInit(t, env.gopts)
- rtest.SetupTarTestFixture(t, env.testdata, datafile)
- return datafile
-}
-
-func TestBackup(t *testing.T) {
- testBackup(t, false)
-}
-
-func TestBackupWithFilesystemSnapshots(t *testing.T) {
- if runtime.GOOS == "windows" && fs.HasSufficientPrivilegesForVSS() == nil {
- testBackup(t, true)
- }
-}
-
-func testBackup(t *testing.T, useFsSnapshot bool) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testSetupBackupData(t, env)
- opts := BackupOptions{UseFsSnapshot: useFsSnapshot}
-
- // first backup
- testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
- snapshotIDs := testRunList(t, "snapshots", env.gopts)
- rtest.Assert(t, len(snapshotIDs) == 1,
- "expected one snapshot, got %v", snapshotIDs)
-
- testRunCheck(t, env.gopts)
- stat1 := dirStats(env.repo)
-
- // second backup, implicit incremental
- testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
- snapshotIDs = testRunList(t, "snapshots", env.gopts)
- rtest.Assert(t, len(snapshotIDs) == 2,
- "expected two snapshots, got %v", snapshotIDs)
-
- stat2 := dirStats(env.repo)
- if stat2.size > stat1.size+stat1.size/10 {
- t.Error("repository size has grown by more than 10 percent")
- }
- t.Logf("repository grown by %d bytes", stat2.size-stat1.size)
-
- testRunCheck(t, env.gopts)
- // third backup, explicit incremental
- opts.Parent = snapshotIDs[0].String()
- testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
- snapshotIDs = testRunList(t, "snapshots", env.gopts)
- rtest.Assert(t, len(snapshotIDs) == 3,
- "expected three snapshots, got %v", snapshotIDs)
-
- stat3 := dirStats(env.repo)
- if stat3.size > stat1.size+stat1.size/10 {
- t.Error("repository size has grown by more than 10 percent")
- }
- t.Logf("repository grown by %d bytes", stat3.size-stat2.size)
-
- // restore all backups and compare
- for i, snapshotID := range snapshotIDs {
- restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
- t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir)
- testRunRestore(t, env.gopts, restoredir, snapshotID)
- diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata"))
- rtest.Assert(t, diff == "", "directories are not equal: %v", diff)
- }
-
- testRunCheck(t, env.gopts)
-}
-
-func TestDryRunBackup(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testSetupBackupData(t, env)
- opts := BackupOptions{}
- dryOpts := BackupOptions{DryRun: true}
-
- // dry run before first backup
- testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, dryOpts, env.gopts)
- snapshotIDs := testRunList(t, "snapshots", env.gopts)
- rtest.Assert(t, len(snapshotIDs) == 0,
- "expected no snapshot, got %v", snapshotIDs)
- packIDs := testRunList(t, "packs", env.gopts)
- rtest.Assert(t, len(packIDs) == 0,
- "expected no data, got %v", snapshotIDs)
- indexIDs := testRunList(t, "index", env.gopts)
- rtest.Assert(t, len(indexIDs) == 0,
- "expected no index, got %v", snapshotIDs)
-
- // first backup
- testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
- snapshotIDs = testRunList(t, "snapshots", env.gopts)
- packIDs = testRunList(t, "packs", env.gopts)
- indexIDs = testRunList(t, "index", env.gopts)
-
- // dry run between backups
- testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, dryOpts, env.gopts)
- snapshotIDsAfter := testRunList(t, "snapshots", env.gopts)
- rtest.Equals(t, snapshotIDs, snapshotIDsAfter)
- dataIDsAfter := testRunList(t, "packs", env.gopts)
- rtest.Equals(t, packIDs, dataIDsAfter)
- indexIDsAfter := testRunList(t, "index", env.gopts)
- rtest.Equals(t, indexIDs, indexIDsAfter)
-
- // second backup, implicit incremental
- testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
- snapshotIDs = testRunList(t, "snapshots", env.gopts)
- packIDs = testRunList(t, "packs", env.gopts)
- indexIDs = testRunList(t, "index", env.gopts)
-
- // another dry run
- testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, dryOpts, env.gopts)
- snapshotIDsAfter = testRunList(t, "snapshots", env.gopts)
- rtest.Equals(t, snapshotIDs, snapshotIDsAfter)
- dataIDsAfter = testRunList(t, "packs", env.gopts)
- rtest.Equals(t, packIDs, dataIDsAfter)
- indexIDsAfter = testRunList(t, "index", env.gopts)
- rtest.Equals(t, indexIDs, indexIDsAfter)
-}
-
-func TestBackupNonExistingFile(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testSetupBackupData(t, env)
- globalOptions.stderr = io.Discard
- defer func() {
- globalOptions.stderr = os.Stderr
- }()
-
- p := filepath.Join(env.testdata, "0", "0", "9")
- dirs := []string{
- filepath.Join(p, "0"),
- filepath.Join(p, "1"),
- filepath.Join(p, "nonexisting"),
- filepath.Join(p, "5"),
- }
-
- opts := BackupOptions{}
-
- testRunBackup(t, "", dirs, opts, env.gopts)
-}
-
-func removePacksExcept(gopts GlobalOptions, t *testing.T, keep restic.IDSet, removeTreePacks bool) {
- r, err := OpenRepository(context.TODO(), gopts)
- rtest.OK(t, err)
-
- // Get all tree packs
- rtest.OK(t, r.LoadIndex(context.TODO()))
-
- treePacks := restic.NewIDSet()
- r.Index().Each(context.TODO(), func(pb restic.PackedBlob) {
- if pb.Type == restic.TreeBlob {
- treePacks.Insert(pb.PackID)
- }
- })
-
- // remove all packs containing data blobs
- rtest.OK(t, r.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error {
- if treePacks.Has(id) != removeTreePacks || keep.Has(id) {
- return nil
- }
- return r.Backend().Remove(context.TODO(), restic.Handle{Type: restic.PackFile, Name: id.String()})
- }))
-}
-
-func TestBackupSelfHealing(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testRunInit(t, env.gopts)
-
- p := filepath.Join(env.testdata, "test/test")
- rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
- rtest.OK(t, appendRandomData(p, 5))
-
- opts := BackupOptions{}
-
- testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
- testRunCheck(t, env.gopts)
-
- // remove all data packs
- removePacksExcept(env.gopts, t, restic.NewIDSet(), false)
-
- testRunRebuildIndex(t, env.gopts)
- // now the repo is also missing the data blob in the index; check should report this
- rtest.Assert(t, runCheck(context.TODO(), CheckOptions{}, env.gopts, nil) != nil,
- "check should have reported an error")
-
- // second backup should report an error but "heal" this situation
- err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
- rtest.Assert(t, err != nil,
- "backup should have reported an error")
- testRunCheck(t, env.gopts)
-}
-
-func TestBackupTreeLoadError(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testRunInit(t, env.gopts)
- p := filepath.Join(env.testdata, "test/test")
- rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
- rtest.OK(t, appendRandomData(p, 5))
-
- opts := BackupOptions{}
- // Backup a subdirectory first, such that we can remove the tree pack for the subdirectory
- testRunBackup(t, env.testdata, []string{"test"}, opts, env.gopts)
-
- r, err := OpenRepository(context.TODO(), env.gopts)
- rtest.OK(t, err)
- rtest.OK(t, r.LoadIndex(context.TODO()))
- treePacks := restic.NewIDSet()
- r.Index().Each(context.TODO(), func(pb restic.PackedBlob) {
- if pb.Type == restic.TreeBlob {
- treePacks.Insert(pb.PackID)
- }
- })
-
- testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
- testRunCheck(t, env.gopts)
-
- // delete the subdirectory pack first
- for id := range treePacks {
- rtest.OK(t, r.Backend().Remove(context.TODO(), restic.Handle{Type: restic.PackFile, Name: id.String()}))
- }
- testRunRebuildIndex(t, env.gopts)
- // now the repo is missing the tree blob in the index; check should report this
- rtest.Assert(t, runCheck(context.TODO(), CheckOptions{}, env.gopts, nil) != nil, "check should have reported an error")
- // second backup should report an error but "heal" this situation
- err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
- rtest.Assert(t, err != nil, "backup should have reported an error for the subdirectory")
- testRunCheck(t, env.gopts)
-
- // remove all tree packs
- removePacksExcept(env.gopts, t, restic.NewIDSet(), true)
- testRunRebuildIndex(t, env.gopts)
- // now the repo is also missing the data blob in the index; check should report this
- rtest.Assert(t, runCheck(context.TODO(), CheckOptions{}, env.gopts, nil) != nil, "check should have reported an error")
- // second backup should report an error but "heal" this situation
- err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
- rtest.Assert(t, err != nil, "backup should have reported an error")
- testRunCheck(t, env.gopts)
-}
-
-func includes(haystack []string, needle string) bool {
- for _, s := range haystack {
- if s == needle {
- return true
- }
- }
-
- return false
-}
-
-func loadSnapshotMap(t testing.TB, gopts GlobalOptions) map[string]struct{} {
- snapshotIDs := testRunList(t, "snapshots", gopts)
-
- m := make(map[string]struct{})
- for _, id := range snapshotIDs {
- m[id.String()] = struct{}{}
- }
-
- return m
-}
-
-func lastSnapshot(old, new map[string]struct{}) (map[string]struct{}, string) {
- for k := range new {
- if _, ok := old[k]; !ok {
- old[k] = struct{}{}
- return old, k
- }
- }
-
- return old, ""
-}
-
-var backupExcludeFilenames = []string{
- "testfile1",
- "foo.tar.gz",
- "private/secret/passwords.txt",
- "work/source/test.c",
-}
-
-func TestBackupExclude(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testRunInit(t, env.gopts)
-
- datadir := filepath.Join(env.base, "testdata")
-
- for _, filename := range backupExcludeFilenames {
- fp := filepath.Join(datadir, filename)
- rtest.OK(t, os.MkdirAll(filepath.Dir(fp), 0755))
-
- f, err := os.Create(fp)
- rtest.OK(t, err)
-
- fmt.Fprint(f, filename)
- rtest.OK(t, f.Close())
- }
-
- snapshots := make(map[string]struct{})
-
- opts := BackupOptions{}
-
- testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
- snapshots, snapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
- files := testRunLs(t, env.gopts, snapshotID)
- rtest.Assert(t, includes(files, "/testdata/foo.tar.gz"),
- "expected file %q in first snapshot, but it's not included", "foo.tar.gz")
-
- opts.Excludes = []string{"*.tar.gz"}
- testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
- snapshots, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
- files = testRunLs(t, env.gopts, snapshotID)
- rtest.Assert(t, !includes(files, "/testdata/foo.tar.gz"),
- "expected file %q not in first snapshot, but it's included", "foo.tar.gz")
-
- opts.Excludes = []string{"*.tar.gz", "private/secret"}
- testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
- _, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
- files = testRunLs(t, env.gopts, snapshotID)
- rtest.Assert(t, !includes(files, "/testdata/foo.tar.gz"),
- "expected file %q not in first snapshot, but it's included", "foo.tar.gz")
- rtest.Assert(t, !includes(files, "/testdata/private/secret/passwords.txt"),
- "expected file %q not in first snapshot, but it's included", "passwords.txt")
-}
-
-func TestBackupErrors(t *testing.T) {
- if runtime.GOOS == "windows" {
- return
- }
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testSetupBackupData(t, env)
-
- // Assume failure
- inaccessibleFile := filepath.Join(env.testdata, "0", "0", "9", "0")
- rtest.OK(t, os.Chmod(inaccessibleFile, 0000))
- defer func() {
- rtest.OK(t, os.Chmod(inaccessibleFile, 0644))
- }()
- opts := BackupOptions{}
- gopts := env.gopts
- gopts.stderr = io.Discard
- err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, gopts)
- rtest.Assert(t, err != nil, "Assumed failure, but no error occurred.")
- rtest.Assert(t, err == ErrInvalidSourceData, "Wrong error returned")
- snapshotIDs := testRunList(t, "snapshots", env.gopts)
- rtest.Assert(t, len(snapshotIDs) == 1,
- "expected one snapshot, got %v", snapshotIDs)
-}
-
-const (
- incrementalFirstWrite = 10 * 1042 * 1024
- incrementalSecondWrite = 1 * 1042 * 1024
- incrementalThirdWrite = 1 * 1042 * 1024
-)
-
-func appendRandomData(filename string, bytes uint) error {
- f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- fmt.Fprint(os.Stderr, err)
- return err
- }
-
- _, err = f.Seek(0, 2)
- if err != nil {
- fmt.Fprint(os.Stderr, err)
- return err
- }
-
- _, err = io.Copy(f, io.LimitReader(rand.Reader, int64(bytes)))
- if err != nil {
- fmt.Fprint(os.Stderr, err)
- return err
- }
-
- return f.Close()
-}
-
-func TestIncrementalBackup(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testRunInit(t, env.gopts)
-
- datadir := filepath.Join(env.base, "testdata")
- testfile := filepath.Join(datadir, "testfile")
-
- rtest.OK(t, appendRandomData(testfile, incrementalFirstWrite))
-
- opts := BackupOptions{}
-
- testRunBackup(t, "", []string{datadir}, opts, env.gopts)
- testRunCheck(t, env.gopts)
- stat1 := dirStats(env.repo)
-
- rtest.OK(t, appendRandomData(testfile, incrementalSecondWrite))
-
- testRunBackup(t, "", []string{datadir}, opts, env.gopts)
- testRunCheck(t, env.gopts)
- stat2 := dirStats(env.repo)
- if stat2.size-stat1.size > incrementalFirstWrite {
- t.Errorf("repository size has grown by more than %d bytes", incrementalFirstWrite)
- }
- t.Logf("repository grown by %d bytes", stat2.size-stat1.size)
-
- rtest.OK(t, appendRandomData(testfile, incrementalThirdWrite))
-
- testRunBackup(t, "", []string{datadir}, opts, env.gopts)
- testRunCheck(t, env.gopts)
- stat3 := dirStats(env.repo)
- if stat3.size-stat2.size > incrementalFirstWrite {
- t.Errorf("repository size has grown by more than %d bytes", incrementalFirstWrite)
- }
- t.Logf("repository grown by %d bytes", stat3.size-stat2.size)
-}
-
-func TestBackupTags(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testSetupBackupData(t, env)
- opts := BackupOptions{}
-
- testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
- testRunCheck(t, env.gopts)
- newest, _ := testRunSnapshots(t, env.gopts)
-
- if newest == nil {
- t.Fatal("expected a backup, got nil")
- }
-
- rtest.Assert(t, len(newest.Tags) == 0,
- "expected no tags, got %v", newest.Tags)
- parent := newest
-
- opts.Tags = restic.TagLists{[]string{"NL"}}
- testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
- testRunCheck(t, env.gopts)
- newest, _ = testRunSnapshots(t, env.gopts)
-
- if newest == nil {
- t.Fatal("expected a backup, got nil")
- }
-
- rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL",
- "expected one NL tag, got %v", newest.Tags)
- // Tagged backup should have untagged backup as parent.
- rtest.Assert(t, parent.ID.Equal(*newest.Parent),
- "expected parent to be %v, got %v", parent.ID, newest.Parent)
-}
-
-func testRunCopy(t testing.TB, srcGopts GlobalOptions, dstGopts GlobalOptions) {
- gopts := srcGopts
- gopts.Repo = dstGopts.Repo
- gopts.password = dstGopts.password
- copyOpts := CopyOptions{
- secondaryRepoOptions: secondaryRepoOptions{
- Repo: srcGopts.Repo,
- password: srcGopts.password,
- },
- }
-
- rtest.OK(t, runCopy(context.TODO(), copyOpts, gopts, nil))
-}
-
-func TestCopy(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
- env2, cleanup2 := withTestEnvironment(t)
- defer cleanup2()
-
- testSetupBackupData(t, env)
- opts := BackupOptions{}
- testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
- testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
- testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
- testRunCheck(t, env.gopts)
-
- testRunInit(t, env2.gopts)
- testRunCopy(t, env.gopts, env2.gopts)
-
- snapshotIDs := testRunList(t, "snapshots", env.gopts)
- copiedSnapshotIDs := testRunList(t, "snapshots", env2.gopts)
-
- // Check that the copies size seems reasonable
- rtest.Assert(t, len(snapshotIDs) == len(copiedSnapshotIDs), "expected %v snapshots, found %v",
- len(snapshotIDs), len(copiedSnapshotIDs))
- stat := dirStats(env.repo)
- stat2 := dirStats(env2.repo)
- sizeDiff := int64(stat.size) - int64(stat2.size)
- if sizeDiff < 0 {
- sizeDiff = -sizeDiff
- }
- rtest.Assert(t, sizeDiff < int64(stat.size)/50, "expected less than 2%% size difference: %v vs. %v",
- stat.size, stat2.size)
-
- // Check integrity of the copy
- testRunCheck(t, env2.gopts)
-
- // Check that the copied snapshots have the same tree contents as the old ones (= identical tree hash)
- origRestores := make(map[string]struct{})
- for i, snapshotID := range snapshotIDs {
- restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
- origRestores[restoredir] = struct{}{}
- testRunRestore(t, env.gopts, restoredir, snapshotID)
- }
- for i, snapshotID := range copiedSnapshotIDs {
- restoredir := filepath.Join(env2.base, fmt.Sprintf("restore%d", i))
- testRunRestore(t, env2.gopts, restoredir, snapshotID)
- foundMatch := false
- for cmpdir := range origRestores {
- diff := directoriesContentsDiff(restoredir, cmpdir)
- if diff == "" {
- delete(origRestores, cmpdir)
- foundMatch = true
- }
- }
-
- rtest.Assert(t, foundMatch, "found no counterpart for snapshot %v", snapshotID)
- }
-
- rtest.Assert(t, len(origRestores) == 0, "found not copied snapshots")
-}
-
-func TestCopyIncremental(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
- env2, cleanup2 := withTestEnvironment(t)
- defer cleanup2()
-
- testSetupBackupData(t, env)
- opts := BackupOptions{}
- testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
- testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
- testRunCheck(t, env.gopts)
-
- testRunInit(t, env2.gopts)
- testRunCopy(t, env.gopts, env2.gopts)
-
- snapshotIDs := testRunList(t, "snapshots", env.gopts)
- copiedSnapshotIDs := testRunList(t, "snapshots", env2.gopts)
-
- // Check that the copies size seems reasonable
- testRunCheck(t, env2.gopts)
- rtest.Assert(t, len(snapshotIDs) == len(copiedSnapshotIDs), "expected %v snapshots, found %v",
- len(snapshotIDs), len(copiedSnapshotIDs))
-
- // check that no snapshots are copied, as there are no new ones
- testRunCopy(t, env.gopts, env2.gopts)
- testRunCheck(t, env2.gopts)
- copiedSnapshotIDs = testRunList(t, "snapshots", env2.gopts)
- rtest.Assert(t, len(snapshotIDs) == len(copiedSnapshotIDs), "still expected %v snapshots, found %v",
- len(snapshotIDs), len(copiedSnapshotIDs))
-
- // check that only new snapshots are copied
- testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
- testRunCopy(t, env.gopts, env2.gopts)
- testRunCheck(t, env2.gopts)
- snapshotIDs = testRunList(t, "snapshots", env.gopts)
- copiedSnapshotIDs = testRunList(t, "snapshots", env2.gopts)
- rtest.Assert(t, len(snapshotIDs) == len(copiedSnapshotIDs), "still expected %v snapshots, found %v",
- len(snapshotIDs), len(copiedSnapshotIDs))
-
- // also test the reverse direction
- testRunCopy(t, env2.gopts, env.gopts)
- testRunCheck(t, env.gopts)
- snapshotIDs = testRunList(t, "snapshots", env.gopts)
- rtest.Assert(t, len(snapshotIDs) == len(copiedSnapshotIDs), "still expected %v snapshots, found %v",
- len(copiedSnapshotIDs), len(snapshotIDs))
-}
-
-func TestCopyUnstableJSON(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
- env2, cleanup2 := withTestEnvironment(t)
- defer cleanup2()
-
- // contains a symlink created using `ln -s '../i/'$'\355\246\361''d/samba' broken-symlink`
- datafile := filepath.Join("testdata", "copy-unstable-json.tar.gz")
- rtest.SetupTarTestFixture(t, env.base, datafile)
-
- testRunInit(t, env2.gopts)
- testRunCopy(t, env.gopts, env2.gopts)
- testRunCheck(t, env2.gopts)
-
- copiedSnapshotIDs := testRunList(t, "snapshots", env2.gopts)
- rtest.Assert(t, 1 == len(copiedSnapshotIDs), "still expected %v snapshot, found %v",
- 1, len(copiedSnapshotIDs))
-}
-
-func TestInitCopyChunkerParams(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
- env2, cleanup2 := withTestEnvironment(t)
- defer cleanup2()
-
- testRunInit(t, env2.gopts)
-
- initOpts := InitOptions{
- secondaryRepoOptions: secondaryRepoOptions{
- Repo: env2.gopts.Repo,
- password: env2.gopts.password,
- },
- }
- rtest.Assert(t, runInit(context.TODO(), initOpts, env.gopts, nil) != nil, "expected invalid init options to fail")
-
- initOpts.CopyChunkerParameters = true
- rtest.OK(t, runInit(context.TODO(), initOpts, env.gopts, nil))
-
- repo, err := OpenRepository(context.TODO(), env.gopts)
- rtest.OK(t, err)
-
- otherRepo, err := OpenRepository(context.TODO(), env2.gopts)
- rtest.OK(t, err)
-
- rtest.Assert(t, repo.Config().ChunkerPolynomial == otherRepo.Config().ChunkerPolynomial,
- "expected equal chunker polynomials, got %v expected %v", repo.Config().ChunkerPolynomial,
- otherRepo.Config().ChunkerPolynomial)
-}
-
-func testRunTag(t testing.TB, opts TagOptions, gopts GlobalOptions) {
- rtest.OK(t, runTag(context.TODO(), opts, gopts, []string{}))
-}
-
-func TestTag(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testSetupBackupData(t, env)
- testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
- testRunCheck(t, env.gopts)
- newest, _ := testRunSnapshots(t, env.gopts)
- if newest == nil {
- t.Fatal("expected a new backup, got nil")
- }
-
- rtest.Assert(t, len(newest.Tags) == 0,
- "expected no tags, got %v", newest.Tags)
- rtest.Assert(t, newest.Original == nil,
- "expected original ID to be nil, got %v", newest.Original)
- originalID := *newest.ID
-
- testRunTag(t, TagOptions{SetTags: restic.TagLists{[]string{"NL"}}}, env.gopts)
- testRunCheck(t, env.gopts)
- newest, _ = testRunSnapshots(t, env.gopts)
- if newest == nil {
- t.Fatal("expected a backup, got nil")
- }
- rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL",
- "set failed, expected one NL tag, got %v", newest.Tags)
- rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
- rtest.Assert(t, *newest.Original == originalID,
- "expected original ID to be set to the first snapshot id")
-
- testRunTag(t, TagOptions{AddTags: restic.TagLists{[]string{"CH"}}}, env.gopts)
- testRunCheck(t, env.gopts)
- newest, _ = testRunSnapshots(t, env.gopts)
- if newest == nil {
- t.Fatal("expected a backup, got nil")
- }
- rtest.Assert(t, len(newest.Tags) == 2 && newest.Tags[0] == "NL" && newest.Tags[1] == "CH",
- "add failed, expected CH,NL tags, got %v", newest.Tags)
- rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
- rtest.Assert(t, *newest.Original == originalID,
- "expected original ID to be set to the first snapshot id")
-
- testRunTag(t, TagOptions{RemoveTags: restic.TagLists{[]string{"NL"}}}, env.gopts)
- testRunCheck(t, env.gopts)
- newest, _ = testRunSnapshots(t, env.gopts)
- if newest == nil {
- t.Fatal("expected a backup, got nil")
- }
- rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "CH",
- "remove failed, expected one CH tag, got %v", newest.Tags)
- rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
- rtest.Assert(t, *newest.Original == originalID,
- "expected original ID to be set to the first snapshot id")
-
- testRunTag(t, TagOptions{AddTags: restic.TagLists{[]string{"US", "RU"}}}, env.gopts)
- testRunTag(t, TagOptions{RemoveTags: restic.TagLists{[]string{"CH", "US", "RU"}}}, env.gopts)
- testRunCheck(t, env.gopts)
- newest, _ = testRunSnapshots(t, env.gopts)
- if newest == nil {
- t.Fatal("expected a backup, got nil")
- }
- rtest.Assert(t, len(newest.Tags) == 0,
- "expected no tags, got %v", newest.Tags)
- rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
- rtest.Assert(t, *newest.Original == originalID,
- "expected original ID to be set to the first snapshot id")
-
- // Check special case of removing all tags.
- testRunTag(t, TagOptions{SetTags: restic.TagLists{[]string{""}}}, env.gopts)
- testRunCheck(t, env.gopts)
- newest, _ = testRunSnapshots(t, env.gopts)
- if newest == nil {
- t.Fatal("expected a backup, got nil")
- }
- rtest.Assert(t, len(newest.Tags) == 0,
- "expected no tags, got %v", newest.Tags)
- rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
- rtest.Assert(t, *newest.Original == originalID,
- "expected original ID to be set to the first snapshot id")
-}
-
-func testRunKeyListOtherIDs(t testing.TB, gopts GlobalOptions) []string {
- buf := bytes.NewBuffer(nil)
-
- globalOptions.stdout = buf
- defer func() {
- globalOptions.stdout = os.Stdout
- }()
-
- rtest.OK(t, runKey(context.TODO(), gopts, []string{"list"}))
-
- scanner := bufio.NewScanner(buf)
- exp := regexp.MustCompile(`^ ([a-f0-9]+) `)
-
- IDs := []string{}
- for scanner.Scan() {
- if id := exp.FindStringSubmatch(scanner.Text()); id != nil {
- IDs = append(IDs, id[1])
- }
- }
-
- return IDs
-}
-
-func testRunKeyAddNewKey(t testing.TB, newPassword string, gopts GlobalOptions) {
- testKeyNewPassword = newPassword
- defer func() {
- testKeyNewPassword = ""
- }()
-
- rtest.OK(t, runKey(context.TODO(), gopts, []string{"add"}))
-}
-
-func testRunKeyAddNewKeyUserHost(t testing.TB, gopts GlobalOptions) {
- testKeyNewPassword = "john's geheimnis"
- defer func() {
- testKeyNewPassword = ""
- keyUsername = ""
- keyHostname = ""
- }()
-
- rtest.OK(t, cmdKey.Flags().Parse([]string{"--user=john", "--host=example.com"}))
-
- t.Log("adding key for john@example.com")
- rtest.OK(t, runKey(context.TODO(), gopts, []string{"add"}))
-
- repo, err := OpenRepository(context.TODO(), gopts)
- rtest.OK(t, err)
- key, err := repository.SearchKey(context.TODO(), repo, testKeyNewPassword, 2, "")
- rtest.OK(t, err)
-
- rtest.Equals(t, "john", key.Username)
- rtest.Equals(t, "example.com", key.Hostname)
-}
-
-func testRunKeyPasswd(t testing.TB, newPassword string, gopts GlobalOptions) {
- testKeyNewPassword = newPassword
- defer func() {
- testKeyNewPassword = ""
- }()
-
- rtest.OK(t, runKey(context.TODO(), gopts, []string{"passwd"}))
-}
-
-func testRunKeyRemove(t testing.TB, gopts GlobalOptions, IDs []string) {
- t.Logf("remove %d keys: %q\n", len(IDs), IDs)
- for _, id := range IDs {
- rtest.OK(t, runKey(context.TODO(), gopts, []string{"remove", id}))
- }
-}
-
-func TestKeyAddRemove(t *testing.T) {
- passwordList := []string{
- "OnnyiasyatvodsEvVodyawit",
- "raicneirvOjEfEigonOmLasOd",
- }
-
- env, cleanup := withTestEnvironment(t)
- // must list keys more than once
- env.gopts.backendTestHook = nil
- defer cleanup()
-
- testRunInit(t, env.gopts)
-
- testRunKeyPasswd(t, "geheim2", env.gopts)
- env.gopts.password = "geheim2"
- t.Logf("changed password to %q", env.gopts.password)
-
- for _, newPassword := range passwordList {
- testRunKeyAddNewKey(t, newPassword, env.gopts)
- t.Logf("added new password %q", newPassword)
- env.gopts.password = newPassword
- testRunKeyRemove(t, env.gopts, testRunKeyListOtherIDs(t, env.gopts))
- }
-
- env.gopts.password = passwordList[len(passwordList)-1]
- t.Logf("testing access with last password %q\n", env.gopts.password)
- rtest.OK(t, runKey(context.TODO(), env.gopts, []string{"list"}))
- testRunCheck(t, env.gopts)
-
- testRunKeyAddNewKeyUserHost(t, env.gopts)
-}
-
-type emptySaveBackend struct {
- restic.Backend
-}
-
-func (b *emptySaveBackend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
- return b.Backend.Save(ctx, h, restic.NewByteReader([]byte{}, nil))
-}
-
-func TestKeyProblems(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testRunInit(t, env.gopts)
- env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) {
- return &emptySaveBackend{r}, nil
- }
-
- testKeyNewPassword = "geheim2"
- defer func() {
- testKeyNewPassword = ""
- }()
-
- err := runKey(context.TODO(), env.gopts, []string{"passwd"})
- t.Log(err)
- rtest.Assert(t, err != nil, "expected passwd change to fail")
-
- err = runKey(context.TODO(), env.gopts, []string{"add"})
- t.Log(err)
- rtest.Assert(t, err != nil, "expected key adding to fail")
-
- t.Logf("testing access with initial password %q\n", env.gopts.password)
- rtest.OK(t, runKey(context.TODO(), env.gopts, []string{"list"}))
- testRunCheck(t, env.gopts)
-}
-
-func testFileSize(filename string, size int64) error {
- fi, err := os.Stat(filename)
- if err != nil {
- return err
- }
-
- if fi.Size() != size {
- return errors.Fatalf("wrong file size for %v: expected %v, got %v", filename, size, fi.Size())
- }
-
- return nil
-}
-
-func TestRestoreFilter(t *testing.T) {
- testfiles := []struct {
- name string
- size uint
- }{
- {"testfile1.c", 100},
- {"testfile2.exe", 101},
- {"subdir1/subdir2/testfile3.docx", 102},
- {"subdir1/subdir2/testfile4.c", 102},
- }
-
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testRunInit(t, env.gopts)
-
- for _, testFile := range testfiles {
- p := filepath.Join(env.testdata, testFile.name)
- rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
- rtest.OK(t, appendRandomData(p, testFile.size))
- }
-
- opts := BackupOptions{}
-
- testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
- testRunCheck(t, env.gopts)
-
- snapshotID := testRunList(t, "snapshots", env.gopts)[0]
-
- // no restore filter should restore all files
- testRunRestore(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID)
- for _, testFile := range testfiles {
- rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", testFile.name), int64(testFile.size)))
- }
-
- for i, pat := range []string{"*.c", "*.exe", "*", "*file3*"} {
- base := filepath.Join(env.base, fmt.Sprintf("restore%d", i+1))
- testRunRestoreExcludes(t, env.gopts, base, snapshotID, []string{pat})
- for _, testFile := range testfiles {
- err := testFileSize(filepath.Join(base, "testdata", testFile.name), int64(testFile.size))
- if ok, _ := filter.Match(pat, filepath.Base(testFile.name)); !ok {
- rtest.OK(t, err)
- } else {
- rtest.Assert(t, os.IsNotExist(err),
- "expected %v to not exist in restore step %v, but it exists, err %v", testFile.name, i+1, err)
- }
- }
- }
-}
-
-func TestRestore(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testRunInit(t, env.gopts)
-
- for i := 0; i < 10; i++ {
- p := filepath.Join(env.testdata, fmt.Sprintf("foo/bar/testfile%v", i))
- rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
- rtest.OK(t, appendRandomData(p, uint(mrand.Intn(2<<21))))
- }
-
- opts := BackupOptions{}
-
- testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
- testRunCheck(t, env.gopts)
-
- // Restore latest without any filters
- restoredir := filepath.Join(env.base, "restore")
- testRunRestoreLatest(t, env.gopts, restoredir, nil, nil)
-
- diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, filepath.Base(env.testdata)))
- rtest.Assert(t, diff == "", "directories are not equal %v", diff)
-}
-
-func TestRestoreLatest(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testRunInit(t, env.gopts)
-
- p := filepath.Join(env.testdata, "testfile.c")
- rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
- rtest.OK(t, appendRandomData(p, 100))
-
- opts := BackupOptions{}
-
- // chdir manually here so we can get the current directory. This is not the
- // same as the temp dir returned by os.MkdirTemp() on darwin.
- back := rtest.Chdir(t, filepath.Dir(env.testdata))
- defer back()
-
- curdir, err := os.Getwd()
- if err != nil {
- t.Fatal(err)
- }
-
- testRunBackup(t, "", []string{filepath.Base(env.testdata)}, opts, env.gopts)
- testRunCheck(t, env.gopts)
-
- rtest.OK(t, os.Remove(p))
- rtest.OK(t, appendRandomData(p, 101))
- testRunBackup(t, "", []string{filepath.Base(env.testdata)}, opts, env.gopts)
- testRunCheck(t, env.gopts)
-
- // Restore latest without any filters
- testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore0"), nil, nil)
- rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", "testfile.c"), int64(101)))
-
- // Setup test files in different directories backed up in different snapshots
- p1 := filepath.Join(curdir, filepath.FromSlash("p1/testfile.c"))
-
- rtest.OK(t, os.MkdirAll(filepath.Dir(p1), 0755))
- rtest.OK(t, appendRandomData(p1, 102))
- testRunBackup(t, "", []string{"p1"}, opts, env.gopts)
- testRunCheck(t, env.gopts)
-
- p2 := filepath.Join(curdir, filepath.FromSlash("p2/testfile.c"))
-
- rtest.OK(t, os.MkdirAll(filepath.Dir(p2), 0755))
- rtest.OK(t, appendRandomData(p2, 103))
- testRunBackup(t, "", []string{"p2"}, opts, env.gopts)
- testRunCheck(t, env.gopts)
-
- p1rAbs := filepath.Join(env.base, "restore1", "p1/testfile.c")
- p2rAbs := filepath.Join(env.base, "restore2", "p2/testfile.c")
-
- testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore1"), []string{filepath.Dir(p1)}, nil)
- rtest.OK(t, testFileSize(p1rAbs, int64(102)))
- if _, err := os.Stat(p2rAbs); os.IsNotExist(err) {
- rtest.Assert(t, os.IsNotExist(err),
- "expected %v to not exist in restore, but it exists, err %v", p2rAbs, err)
- }
-
- testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore2"), []string{filepath.Dir(p2)}, nil)
- rtest.OK(t, testFileSize(p2rAbs, int64(103)))
- if _, err := os.Stat(p1rAbs); os.IsNotExist(err) {
- rtest.Assert(t, os.IsNotExist(err),
- "expected %v to not exist in restore, but it exists, err %v", p1rAbs, err)
- }
-}
-
-func TestRestoreWithPermissionFailure(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- datafile := filepath.Join("testdata", "repo-restore-permissions-test.tar.gz")
- rtest.SetupTarTestFixture(t, env.base, datafile)
-
- snapshots := testRunList(t, "snapshots", env.gopts)
- rtest.Assert(t, len(snapshots) > 0,
- "no snapshots found in repo (%v)", datafile)
-
- globalOptions.stderr = io.Discard
- defer func() {
- globalOptions.stderr = os.Stderr
- }()
-
- testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshots[0])
-
- // make sure that all files have been restored, regardless of any
- // permission errors
- files := testRunLs(t, env.gopts, snapshots[0].String())
- for _, filename := range files {
- fi, err := os.Lstat(filepath.Join(env.base, "restore", filename))
- rtest.OK(t, err)
-
- rtest.Assert(t, !isFile(fi) || fi.Size() > 0,
- "file %v restored, but filesize is 0", filename)
- }
-}
-
-func setZeroModTime(filename string) error {
- var utimes = []syscall.Timespec{
- syscall.NsecToTimespec(0),
- syscall.NsecToTimespec(0),
- }
-
- return syscall.UtimesNano(filename, utimes)
-}
-
-func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testRunInit(t, env.gopts)
-
- p := filepath.Join(env.testdata, "subdir1", "subdir2", "subdir3", "file.ext")
- rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
- rtest.OK(t, appendRandomData(p, 200))
- rtest.OK(t, setZeroModTime(filepath.Join(env.testdata, "subdir1", "subdir2")))
-
- opts := BackupOptions{}
-
- testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
- testRunCheck(t, env.gopts)
-
- snapshotID := testRunList(t, "snapshots", env.gopts)[0]
-
- // restore with filter "*.ext", this should restore "file.ext", but
- // since the directories are ignored and only created because of
- // "file.ext", no meta data should be restored for them.
- testRunRestoreIncludes(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID, []string{"*.ext"})
-
- f1 := filepath.Join(env.base, "restore0", "testdata", "subdir1", "subdir2")
- _, err := os.Stat(f1)
- rtest.OK(t, err)
-
- // restore with filter "*", this should restore meta data on everything.
- testRunRestoreIncludes(t, env.gopts, filepath.Join(env.base, "restore1"), snapshotID, []string{"*"})
-
- f2 := filepath.Join(env.base, "restore1", "testdata", "subdir1", "subdir2")
- fi, err := os.Stat(f2)
- rtest.OK(t, err)
-
- rtest.Assert(t, fi.ModTime() == time.Unix(0, 0),
- "meta data of intermediate directory hasn't been restore")
-}
-
-func TestFind(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- datafile := testSetupBackupData(t, env)
- opts := BackupOptions{}
-
- testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
- testRunCheck(t, env.gopts)
-
- results := testRunFind(t, false, env.gopts, "unexistingfile")
- rtest.Assert(t, len(results) == 0, "unexisting file found in repo (%v)", datafile)
-
- results = testRunFind(t, false, env.gopts, "testfile")
- lines := strings.Split(string(results), "\n")
- rtest.Assert(t, len(lines) == 2, "expected one file found in repo (%v)", datafile)
-
- results = testRunFind(t, false, env.gopts, "testfile*")
- lines = strings.Split(string(results), "\n")
- rtest.Assert(t, len(lines) == 4, "expected three files found in repo (%v)", datafile)
-}
-
-type testMatch struct {
- Path string `json:"path,omitempty"`
- Permissions string `json:"permissions,omitempty"`
- Size uint64 `json:"size,omitempty"`
- Date time.Time `json:"date,omitempty"`
- UID uint32 `json:"uid,omitempty"`
- GID uint32 `json:"gid,omitempty"`
-}
-
-type testMatches struct {
- Hits int `json:"hits,omitempty"`
- SnapshotID string `json:"snapshot,omitempty"`
- Matches []testMatch `json:"matches,omitempty"`
-}
-
-func TestFindJSON(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- datafile := testSetupBackupData(t, env)
- opts := BackupOptions{}
-
- testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
- testRunCheck(t, env.gopts)
-
- results := testRunFind(t, true, env.gopts, "unexistingfile")
- matches := []testMatches{}
- rtest.OK(t, json.Unmarshal(results, &matches))
- rtest.Assert(t, len(matches) == 0, "expected no match in repo (%v)", datafile)
-
- results = testRunFind(t, true, env.gopts, "testfile")
- rtest.OK(t, json.Unmarshal(results, &matches))
- rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile)
- rtest.Assert(t, len(matches[0].Matches) == 1, "expected a single file to match (%v)", datafile)
- rtest.Assert(t, matches[0].Hits == 1, "expected hits to show 1 match (%v)", datafile)
-
- results = testRunFind(t, true, env.gopts, "testfile*")
- rtest.OK(t, json.Unmarshal(results, &matches))
- rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile)
- rtest.Assert(t, len(matches[0].Matches) == 3, "expected 3 files to match (%v)", datafile)
- rtest.Assert(t, matches[0].Hits == 3, "expected hits to show 3 matches (%v)", datafile)
-}
-
-func testRebuildIndex(t *testing.T, backendTestHook backendWrapper) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- datafile := filepath.Join("..", "..", "internal", "checker", "testdata", "duplicate-packs-in-index-test-repo.tar.gz")
- rtest.SetupTarTestFixture(t, env.base, datafile)
-
- out, err := testRunCheckOutput(env.gopts)
- if !strings.Contains(out, "contained in several indexes") {
- t.Fatalf("did not find checker hint for packs in several indexes")
- }
-
- if err != nil {
- t.Fatalf("expected no error from checker for test repository, got %v", err)
- }
-
- if !strings.Contains(out, "restic rebuild-index") {
- t.Fatalf("did not find hint for rebuild-index command")
- }
-
- env.gopts.backendTestHook = backendTestHook
- testRunRebuildIndex(t, env.gopts)
-
- env.gopts.backendTestHook = nil
- out, err = testRunCheckOutput(env.gopts)
- if len(out) != 0 {
- t.Fatalf("expected no output from the checker, got: %v", out)
- }
-
- if err != nil {
- t.Fatalf("expected no error from checker after rebuild-index, got: %v", err)
- }
-}
-
-func TestRebuildIndex(t *testing.T) {
- testRebuildIndex(t, nil)
-}
-
-func TestRebuildIndexAlwaysFull(t *testing.T) {
- indexFull := index.IndexFull
- defer func() {
- index.IndexFull = indexFull
- }()
- index.IndexFull = func(*index.Index, bool) bool { return true }
- testRebuildIndex(t, nil)
-}
-
-// indexErrorBackend modifies the first index after reading.
-type indexErrorBackend struct {
- restic.Backend
- lock sync.Mutex
- hasErred bool
-}
-
-func (b *indexErrorBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64, consumer func(rd io.Reader) error) error {
- return b.Backend.Load(ctx, h, length, offset, func(rd io.Reader) error {
- // protect hasErred
- b.lock.Lock()
- defer b.lock.Unlock()
- if !b.hasErred && h.Type == restic.IndexFile {
- b.hasErred = true
- return consumer(errorReadCloser{rd})
- }
- return consumer(rd)
- })
-}
-
-type errorReadCloser struct {
- io.Reader
-}
-
-func (erd errorReadCloser) Read(p []byte) (int, error) {
- n, err := erd.Reader.Read(p)
- if n > 0 {
- p[0] ^= 1
- }
- return n, err
-}
-
-func TestRebuildIndexDamage(t *testing.T) {
- testRebuildIndex(t, func(r restic.Backend) (restic.Backend, error) {
- return &indexErrorBackend{
- Backend: r,
- }, nil
- })
-}
-
-type appendOnlyBackend struct {
- restic.Backend
-}
-
-// called via repo.Backend().Remove()
-func (b *appendOnlyBackend) Remove(ctx context.Context, h restic.Handle) error {
- return errors.Errorf("Failed to remove %v", h)
-}
-
-func TestRebuildIndexFailsOnAppendOnly(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- datafile := filepath.Join("..", "..", "internal", "checker", "testdata", "duplicate-packs-in-index-test-repo.tar.gz")
- rtest.SetupTarTestFixture(t, env.base, datafile)
-
- globalOptions.stdout = io.Discard
- defer func() {
- globalOptions.stdout = os.Stdout
- }()
-
- env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) {
- return &appendOnlyBackend{r}, nil
- }
- err := runRebuildIndex(context.TODO(), RebuildIndexOptions{}, env.gopts)
- if err == nil {
- t.Error("expected rebuildIndex to fail")
- }
- t.Log(err)
-}
-
func TestCheckRestoreNoLock(t *testing.T) {
env, cleanup := withTestEnvironment(t)
defer cleanup()
@@ -1575,217 +32,10 @@ func TestCheckRestoreNoLock(t *testing.T) {
testRunCheck(t, env.gopts)
- snapshotIDs := testRunList(t, "snapshots", env.gopts)
- if len(snapshotIDs) == 0 {
- t.Fatalf("found no snapshots")
- }
-
+ snapshotIDs := testListSnapshots(t, env.gopts, 4)
testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshotIDs[0])
}
-func TestPrune(t *testing.T) {
- testPruneVariants(t, false)
- testPruneVariants(t, true)
-}
-
-func testPruneVariants(t *testing.T, unsafeNoSpaceRecovery bool) {
- suffix := ""
- if unsafeNoSpaceRecovery {
- suffix = "-recovery"
- }
- t.Run("0"+suffix, func(t *testing.T) {
- opts := PruneOptions{MaxUnused: "0%", unsafeRecovery: unsafeNoSpaceRecovery}
- checkOpts := CheckOptions{ReadData: true, CheckUnused: true}
- testPrune(t, opts, checkOpts)
- })
-
- t.Run("50"+suffix, func(t *testing.T) {
- opts := PruneOptions{MaxUnused: "50%", unsafeRecovery: unsafeNoSpaceRecovery}
- checkOpts := CheckOptions{ReadData: true}
- testPrune(t, opts, checkOpts)
- })
-
- t.Run("unlimited"+suffix, func(t *testing.T) {
- opts := PruneOptions{MaxUnused: "unlimited", unsafeRecovery: unsafeNoSpaceRecovery}
- checkOpts := CheckOptions{ReadData: true}
- testPrune(t, opts, checkOpts)
- })
-
- t.Run("CachableOnly"+suffix, func(t *testing.T) {
- opts := PruneOptions{MaxUnused: "5%", RepackCachableOnly: true, unsafeRecovery: unsafeNoSpaceRecovery}
- checkOpts := CheckOptions{ReadData: true}
- testPrune(t, opts, checkOpts)
- })
- t.Run("Small", func(t *testing.T) {
- opts := PruneOptions{MaxUnused: "unlimited", RepackSmall: true}
- checkOpts := CheckOptions{ReadData: true, CheckUnused: true}
- testPrune(t, opts, checkOpts)
- })
-}
-
-func createPrunableRepo(t *testing.T, env *testEnvironment) {
- testSetupBackupData(t, env)
- opts := BackupOptions{}
-
- testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
- firstSnapshot := testRunList(t, "snapshots", env.gopts)
- rtest.Assert(t, len(firstSnapshot) == 1,
- "expected one snapshot, got %v", firstSnapshot)
-
- testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
- testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
-
- snapshotIDs := testRunList(t, "snapshots", env.gopts)
- rtest.Assert(t, len(snapshotIDs) == 3,
- "expected 3 snapshot, got %v", snapshotIDs)
-
- testRunForgetJSON(t, env.gopts)
- testRunForget(t, env.gopts, firstSnapshot[0].String())
-}
-
-func testPrune(t *testing.T, pruneOpts PruneOptions, checkOpts CheckOptions) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- createPrunableRepo(t, env)
- testRunPrune(t, env.gopts, pruneOpts)
- rtest.OK(t, runCheck(context.TODO(), checkOpts, env.gopts, nil))
-}
-
-var pruneDefaultOptions = PruneOptions{MaxUnused: "5%"}
-
-func listPacks(gopts GlobalOptions, t *testing.T) restic.IDSet {
- r, err := OpenRepository(context.TODO(), gopts)
- rtest.OK(t, err)
-
- packs := restic.NewIDSet()
-
- rtest.OK(t, r.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error {
- packs.Insert(id)
- return nil
- }))
- return packs
-}
-
-func TestPruneWithDamagedRepository(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- datafile := filepath.Join("testdata", "backup-data.tar.gz")
- testRunInit(t, env.gopts)
-
- rtest.SetupTarTestFixture(t, env.testdata, datafile)
- opts := BackupOptions{}
-
- // create and delete snapshot to create unused blobs
- testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
- firstSnapshot := testRunList(t, "snapshots", env.gopts)
- rtest.Assert(t, len(firstSnapshot) == 1,
- "expected one snapshot, got %v", firstSnapshot)
- testRunForget(t, env.gopts, firstSnapshot[0].String())
-
- oldPacks := listPacks(env.gopts, t)
-
- // create new snapshot, but lose all data
- testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
- snapshotIDs := testRunList(t, "snapshots", env.gopts)
-
- removePacksExcept(env.gopts, t, oldPacks, false)
-
- rtest.Assert(t, len(snapshotIDs) == 1,
- "expected one snapshot, got %v", snapshotIDs)
-
- oldHook := env.gopts.backendTestHook
- env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) { return newListOnceBackend(r), nil }
- defer func() {
- env.gopts.backendTestHook = oldHook
- }()
- // prune should fail
- rtest.Assert(t, runPrune(context.TODO(), pruneDefaultOptions, env.gopts) == errorPacksMissing,
- "prune should have reported index not complete error")
-}
-
-// Test repos for edge cases
-func TestEdgeCaseRepos(t *testing.T) {
- opts := CheckOptions{}
-
- // repo where index is completely missing
- // => check and prune should fail
- t.Run("no-index", func(t *testing.T) {
- testEdgeCaseRepo(t, "repo-index-missing.tar.gz", opts, pruneDefaultOptions, false, false)
- })
-
- // repo where an existing and used blob is missing from the index
- // => check and prune should fail
- t.Run("index-missing-blob", func(t *testing.T) {
- testEdgeCaseRepo(t, "repo-index-missing-blob.tar.gz", opts, pruneDefaultOptions, false, false)
- })
-
- // repo where a blob is missing
- // => check and prune should fail
- t.Run("missing-data", func(t *testing.T) {
- testEdgeCaseRepo(t, "repo-data-missing.tar.gz", opts, pruneDefaultOptions, false, false)
- })
-
- // repo where blobs which are not needed are missing or in invalid pack files
- // => check should fail and prune should repair this
- t.Run("missing-unused-data", func(t *testing.T) {
- testEdgeCaseRepo(t, "repo-unused-data-missing.tar.gz", opts, pruneDefaultOptions, false, true)
- })
-
- // repo where data exists that is not referenced
- // => check and prune should fully work
- t.Run("unreferenced-data", func(t *testing.T) {
- testEdgeCaseRepo(t, "repo-unreferenced-data.tar.gz", opts, pruneDefaultOptions, true, true)
- })
-
- // repo where an obsolete index still exists
- // => check and prune should fully work
- t.Run("obsolete-index", func(t *testing.T) {
- testEdgeCaseRepo(t, "repo-obsolete-index.tar.gz", opts, pruneDefaultOptions, true, true)
- })
-
- // repo which contains mixed (data/tree) packs
- // => check and prune should fully work
- t.Run("mixed-packs", func(t *testing.T) {
- testEdgeCaseRepo(t, "repo-mixed.tar.gz", opts, pruneDefaultOptions, true, true)
- })
-
- // repo which contains duplicate blobs
- // => checking for unused data should report an error and prune resolves the
- // situation
- opts = CheckOptions{
- ReadData: true,
- CheckUnused: true,
- }
- t.Run("duplicates", func(t *testing.T) {
- testEdgeCaseRepo(t, "repo-duplicates.tar.gz", opts, pruneDefaultOptions, false, true)
- })
-}
-
-func testEdgeCaseRepo(t *testing.T, tarfile string, optionsCheck CheckOptions, optionsPrune PruneOptions, checkOK, pruneOK bool) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- datafile := filepath.Join("testdata", tarfile)
- rtest.SetupTarTestFixture(t, env.base, datafile)
-
- if checkOK {
- testRunCheck(t, env.gopts)
- } else {
- rtest.Assert(t, runCheck(context.TODO(), optionsCheck, env.gopts, nil) != nil,
- "check should have reported an error")
- }
-
- if pruneOK {
- testRunPrune(t, env.gopts, optionsPrune)
- testRunCheck(t, env.gopts)
- } else {
- rtest.Assert(t, runPrune(context.TODO(), optionsPrune, env.gopts) != nil,
- "prune should have reported an error")
- }
-}
-
// a listOnceBackend only allows listing once per filetype
// listing filetypes more than once may cause problems with eventually consistent
// backends (like e.g. Amazon S3) as the second listing may be inconsistent to what
@@ -1837,301 +87,15 @@ func TestListOnce(t *testing.T) {
testRunPrune(t, env.gopts, pruneOpts)
rtest.OK(t, runCheck(context.TODO(), checkOpts, env.gopts, nil))
- rtest.OK(t, runRebuildIndex(context.TODO(), RebuildIndexOptions{}, env.gopts))
- rtest.OK(t, runRebuildIndex(context.TODO(), RebuildIndexOptions{ReadAllPacks: true}, env.gopts))
-}
-
-func TestHardLink(t *testing.T) {
- // this test assumes a test set with a single directory containing hard linked files
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- datafile := filepath.Join("testdata", "test.hl.tar.gz")
- fd, err := os.Open(datafile)
- if os.IsNotExist(err) {
- t.Skipf("unable to find data file %q, skipping", datafile)
- return
- }
- rtest.OK(t, err)
- rtest.OK(t, fd.Close())
-
- testRunInit(t, env.gopts)
-
- rtest.SetupTarTestFixture(t, env.testdata, datafile)
-
- linkTests := createFileSetPerHardlink(env.testdata)
-
- opts := BackupOptions{}
-
- // first backup
- testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
- snapshotIDs := testRunList(t, "snapshots", env.gopts)
- rtest.Assert(t, len(snapshotIDs) == 1,
- "expected one snapshot, got %v", snapshotIDs)
-
- testRunCheck(t, env.gopts)
-
- // restore all backups and compare
- for i, snapshotID := range snapshotIDs {
- restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
- t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir)
- testRunRestore(t, env.gopts, restoredir, snapshotID)
- diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata"))
- rtest.Assert(t, diff == "", "directories are not equal %v", diff)
-
- linkResults := createFileSetPerHardlink(filepath.Join(restoredir, "testdata"))
- rtest.Assert(t, linksEqual(linkTests, linkResults),
- "links are not equal")
- }
-
- testRunCheck(t, env.gopts)
-}
-
-func linksEqual(source, dest map[uint64][]string) bool {
- for _, vs := range source {
- found := false
- for kd, vd := range dest {
- if linkEqual(vs, vd) {
- delete(dest, kd)
- found = true
- break
- }
- }
- if !found {
- return false
- }
- }
-
- return len(dest) == 0
-}
-
-func linkEqual(source, dest []string) bool {
- // equal if sliced are equal without considering order
- if source == nil && dest == nil {
- return true
- }
-
- if source == nil || dest == nil {
- return false
- }
-
- if len(source) != len(dest) {
- return false
- }
-
- for i := range source {
- found := false
- for j := range dest {
- if source[i] == dest[j] {
- found = true
- break
- }
- }
- if !found {
- return false
- }
- }
-
- return true
-}
-
-func TestQuietBackup(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testSetupBackupData(t, env)
- opts := BackupOptions{}
-
- env.gopts.Quiet = false
- testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
- snapshotIDs := testRunList(t, "snapshots", env.gopts)
- rtest.Assert(t, len(snapshotIDs) == 1,
- "expected one snapshot, got %v", snapshotIDs)
-
- testRunCheck(t, env.gopts)
-
- env.gopts.Quiet = true
- testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
- snapshotIDs = testRunList(t, "snapshots", env.gopts)
- rtest.Assert(t, len(snapshotIDs) == 2,
- "expected two snapshots, got %v", snapshotIDs)
-
- testRunCheck(t, env.gopts)
-}
-
-func copyFile(dst string, src string) error {
- srcFile, err := os.Open(src)
- if err != nil {
- return err
- }
-
- dstFile, err := os.Create(dst)
- if err != nil {
- // ignore subsequent errors
- _ = srcFile.Close()
- return err
- }
-
- _, err = io.Copy(dstFile, srcFile)
- if err != nil {
- // ignore subsequent errors
- _ = srcFile.Close()
- _ = dstFile.Close()
- return err
- }
-
- err = srcFile.Close()
- if err != nil {
- // ignore subsequent errors
- _ = dstFile.Close()
- return err
- }
-
- err = dstFile.Close()
- if err != nil {
- return err
- }
-
- return nil
-}
-
-var diffOutputRegexPatterns = []string{
- "-.+modfile",
- "M.+modfile1",
- "\\+.+modfile2",
- "\\+.+modfile3",
- "\\+.+modfile4",
- "-.+submoddir",
- "-.+submoddir.subsubmoddir",
- "\\+.+submoddir2",
- "\\+.+submoddir2.subsubmoddir",
- "Files: +2 new, +1 removed, +1 changed",
- "Dirs: +3 new, +2 removed",
- "Data Blobs: +2 new, +1 removed",
- "Added: +7[0-9]{2}\\.[0-9]{3} KiB",
- "Removed: +2[0-9]{2}\\.[0-9]{3} KiB",
-}
-
-func setupDiffRepo(t *testing.T) (*testEnvironment, func(), string, string) {
- env, cleanup := withTestEnvironment(t)
- testRunInit(t, env.gopts)
-
- datadir := filepath.Join(env.base, "testdata")
- testdir := filepath.Join(datadir, "testdir")
- subtestdir := filepath.Join(testdir, "subtestdir")
- testfile := filepath.Join(testdir, "testfile")
-
- rtest.OK(t, os.Mkdir(testdir, 0755))
- rtest.OK(t, os.Mkdir(subtestdir, 0755))
- rtest.OK(t, appendRandomData(testfile, 256*1024))
-
- moddir := filepath.Join(datadir, "moddir")
- submoddir := filepath.Join(moddir, "submoddir")
- subsubmoddir := filepath.Join(submoddir, "subsubmoddir")
- modfile := filepath.Join(moddir, "modfile")
- rtest.OK(t, os.Mkdir(moddir, 0755))
- rtest.OK(t, os.Mkdir(submoddir, 0755))
- rtest.OK(t, os.Mkdir(subsubmoddir, 0755))
- rtest.OK(t, copyFile(modfile, testfile))
- rtest.OK(t, appendRandomData(modfile+"1", 256*1024))
-
- snapshots := make(map[string]struct{})
- opts := BackupOptions{}
- testRunBackup(t, "", []string{datadir}, opts, env.gopts)
- snapshots, firstSnapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
-
- rtest.OK(t, os.Rename(modfile, modfile+"3"))
- rtest.OK(t, os.Rename(submoddir, submoddir+"2"))
- rtest.OK(t, appendRandomData(modfile+"1", 256*1024))
- rtest.OK(t, appendRandomData(modfile+"2", 256*1024))
- rtest.OK(t, os.Mkdir(modfile+"4", 0755))
-
- testRunBackup(t, "", []string{datadir}, opts, env.gopts)
- _, secondSnapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
-
- return env, cleanup, firstSnapshotID, secondSnapshotID
-}
-
-func TestDiff(t *testing.T) {
- env, cleanup, firstSnapshotID, secondSnapshotID := setupDiffRepo(t)
- defer cleanup()
-
- // quiet suppresses the diff output except for the summary
- env.gopts.Quiet = false
- _, err := testRunDiffOutput(env.gopts, "", secondSnapshotID)
- rtest.Assert(t, err != nil, "expected error on invalid snapshot id")
-
- out, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID)
- rtest.OK(t, err)
-
- for _, pattern := range diffOutputRegexPatterns {
- r, err := regexp.Compile(pattern)
- rtest.Assert(t, err == nil, "failed to compile regexp %v", pattern)
- rtest.Assert(t, r.MatchString(out), "expected pattern %v in output, got\n%v", pattern, out)
- }
-
- // check quiet output
- env.gopts.Quiet = true
- outQuiet, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID)
- rtest.OK(t, err)
-
- rtest.Assert(t, len(outQuiet) < len(out), "expected shorter output on quiet mode %v vs. %v", len(outQuiet), len(out))
-}
-
-type typeSniffer struct {
- MessageType string `json:"message_type"`
-}
-
-func TestDiffJSON(t *testing.T) {
- env, cleanup, firstSnapshotID, secondSnapshotID := setupDiffRepo(t)
- defer cleanup()
-
- // quiet suppresses the diff output except for the summary
- env.gopts.Quiet = false
- env.gopts.JSON = true
- out, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID)
- rtest.OK(t, err)
-
- var stat DiffStatsContainer
- var changes int
-
- scanner := bufio.NewScanner(strings.NewReader(out))
- for scanner.Scan() {
- line := scanner.Text()
- var sniffer typeSniffer
- rtest.OK(t, json.Unmarshal([]byte(line), &sniffer))
- switch sniffer.MessageType {
- case "change":
- changes++
- case "statistics":
- rtest.OK(t, json.Unmarshal([]byte(line), &stat))
- default:
- t.Fatalf("unexpected message type %v", sniffer.MessageType)
- }
- }
- rtest.Equals(t, 9, changes)
- rtest.Assert(t, stat.Added.Files == 2 && stat.Added.Dirs == 3 && stat.Added.DataBlobs == 2 &&
- stat.Removed.Files == 1 && stat.Removed.Dirs == 2 && stat.Removed.DataBlobs == 1 &&
- stat.ChangedFiles == 1, "unexpected statistics")
-
- // check quiet output
- env.gopts.Quiet = true
- outQuiet, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID)
- rtest.OK(t, err)
-
- stat = DiffStatsContainer{}
- rtest.OK(t, json.Unmarshal([]byte(outQuiet), &stat))
- rtest.Assert(t, stat.Added.Files == 2 && stat.Added.Dirs == 3 && stat.Added.DataBlobs == 2 &&
- stat.Removed.Files == 1 && stat.Removed.Dirs == 2 && stat.Removed.DataBlobs == 1 &&
- stat.ChangedFiles == 1, "unexpected statistics")
- rtest.Assert(t, stat.SourceSnapshot == firstSnapshotID && stat.TargetSnapshot == secondSnapshotID, "unexpected snapshot ids")
+ rtest.OK(t, runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts))
+ rtest.OK(t, runRebuildIndex(context.TODO(), RepairIndexOptions{ReadAllPacks: true}, env.gopts))
}
type writeToOnly struct {
rd io.Reader
}
-func (r *writeToOnly) Read(p []byte) (n int, err error) {
+func (r *writeToOnly) Read(_ []byte) (n int, err error) {
return 0, fmt.Errorf("should have called WriteTo instead")
}
@@ -2169,9 +133,7 @@ func TestBackendLoadWriteTo(t *testing.T) {
// loading snapshots must still work
env.gopts.NoCache = false
- firstSnapshot := testRunList(t, "snapshots", env.gopts)
- rtest.Assert(t, len(firstSnapshot) == 1,
- "expected one snapshot, got %v", firstSnapshot)
+ testListSnapshots(t, env.gopts, 1)
}
func TestFindListOnce(t *testing.T) {
@@ -2187,9 +149,9 @@ func TestFindListOnce(t *testing.T) {
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
- secondSnapshot := testRunList(t, "snapshots", env.gopts)
+ secondSnapshot := testListSnapshots(t, env.gopts, 2)
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
- thirdSnapshot := restic.NewIDSet(testRunList(t, "snapshots", env.gopts)...)
+ thirdSnapshot := restic.NewIDSet(testListSnapshots(t, env.gopts, 3)...)
repo, err := OpenRepository(context.TODO(), env.gopts)
rtest.OK(t, err)
diff --git a/cmd/restic/local_layout_test.go b/cmd/restic/local_layout_test.go
deleted file mode 100644
index eb614f1c3..000000000
--- a/cmd/restic/local_layout_test.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package main
-
-import (
- "path/filepath"
- "testing"
-
- rtest "github.com/restic/restic/internal/test"
-)
-
-func TestRestoreLocalLayout(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- var tests = []struct {
- filename string
- layout string
- }{
- {"repo-layout-default.tar.gz", ""},
- {"repo-layout-s3legacy.tar.gz", ""},
- {"repo-layout-default.tar.gz", "default"},
- {"repo-layout-s3legacy.tar.gz", "s3legacy"},
- }
-
- for _, test := range tests {
- datafile := filepath.Join("..", "..", "internal", "backend", "testdata", test.filename)
-
- rtest.SetupTarTestFixture(t, env.base, datafile)
-
- env.gopts.extended["local.layout"] = test.layout
-
- // check the repo
- testRunCheck(t, env.gopts)
-
- // restore latest snapshot
- target := filepath.Join(env.base, "restore")
- testRunRestoreLatest(t, env.gopts, target, nil, nil)
-
- rtest.RemoveAll(t, filepath.Join(env.base, "repo"))
- rtest.RemoveAll(t, target)
- }
-}
diff --git a/cmd/restic/lock.go b/cmd/restic/lock.go
index f39a08db6..11c1ed8f5 100644
--- a/cmd/restic/lock.go
+++ b/cmd/restic/lock.go
@@ -2,6 +2,7 @@ package main
import (
"context"
+ "fmt"
"sync"
"time"
@@ -11,6 +12,7 @@ import (
)
type lockContext struct {
+ lock *restic.Lock
cancel context.CancelFunc
refreshWG sync.WaitGroup
}
@@ -21,17 +23,29 @@ var globalLocks struct {
sync.Once
}
-func lockRepo(ctx context.Context, repo restic.Repository) (*restic.Lock, context.Context, error) {
- return lockRepository(ctx, repo, false)
+func lockRepo(ctx context.Context, repo restic.Repository, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) {
+ return lockRepository(ctx, repo, false, retryLock, json)
}
-func lockRepoExclusive(ctx context.Context, repo restic.Repository) (*restic.Lock, context.Context, error) {
- return lockRepository(ctx, repo, true)
+func lockRepoExclusive(ctx context.Context, repo restic.Repository, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) {
+ return lockRepository(ctx, repo, true, retryLock, json)
+}
+
+var (
+ retrySleepStart = 5 * time.Second
+ retrySleepMax = 60 * time.Second
+)
+
+func minDuration(a, b time.Duration) time.Duration {
+ if a <= b {
+ return a
+ }
+ return b
}
// lockRepository wraps the ctx such that it is cancelled when the repository is unlocked
// cancelling the original context also stops the lock refresh
-func lockRepository(ctx context.Context, repo restic.Repository, exclusive bool) (*restic.Lock, context.Context, error) {
+func lockRepository(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) {
// make sure that a repository is unlocked properly and after cancel() was
// called by the cleanup handler in global.go
globalLocks.Do(func() {
@@ -43,26 +57,65 @@ func lockRepository(ctx context.Context, repo restic.Repository, exclusive bool)
lockFn = restic.NewExclusiveLock
}
- lock, err := lockFn(ctx, repo)
+ var lock *restic.Lock
+ var err error
+
+ retrySleep := minDuration(retrySleepStart, retryLock)
+ retryMessagePrinted := false
+ retryTimeout := time.After(retryLock)
+
+retryLoop:
+ for {
+ lock, err = lockFn(ctx, repo)
+ if err != nil && restic.IsAlreadyLocked(err) {
+
+ if !retryMessagePrinted {
+ if !json {
+ Verbosef("repo already locked, waiting up to %s for the lock\n", retryLock)
+ }
+ retryMessagePrinted = true
+ }
+
+ debug.Log("repo already locked, retrying in %v", retrySleep)
+ retrySleepCh := time.After(retrySleep)
+
+ select {
+ case <-ctx.Done():
+ return nil, ctx, ctx.Err()
+ case <-retryTimeout:
+ debug.Log("repo already locked, timeout expired")
+ // Last lock attempt
+ lock, err = lockFn(ctx, repo)
+ break retryLoop
+ case <-retrySleepCh:
+ retrySleep = minDuration(retrySleep*2, retrySleepMax)
+ }
+ } else {
+ // anything else, either a successful lock or another error
+ break retryLoop
+ }
+ }
if restic.IsInvalidLock(err) {
return nil, ctx, errors.Fatalf("%v\n\nthe `unlock --remove-all` command can be used to remove invalid locks. Make sure that no other restic process is accessing the repository when running the command", err)
}
if err != nil {
- return nil, ctx, errors.Fatalf("unable to create lock in backend: %v", err)
+ return nil, ctx, fmt.Errorf("unable to create lock in backend: %w", err)
}
debug.Log("create lock %p (exclusive %v)", lock, exclusive)
ctx, cancel := context.WithCancel(ctx)
lockInfo := &lockContext{
+ lock: lock,
cancel: cancel,
}
lockInfo.refreshWG.Add(2)
refreshChan := make(chan struct{})
+ forceRefreshChan := make(chan refreshLockRequest)
globalLocks.Lock()
globalLocks.locks[lock] = lockInfo
- go refreshLocks(ctx, lock, lockInfo, refreshChan)
- go monitorLockRefresh(ctx, lock, lockInfo, refreshChan)
+ go refreshLocks(ctx, repo.Backend(), lockInfo, refreshChan, forceRefreshChan)
+ go monitorLockRefresh(ctx, lockInfo, refreshChan, forceRefreshChan)
globalLocks.Unlock()
return lock, ctx, err
@@ -74,8 +127,13 @@ var refreshInterval = 5 * time.Minute
// the difference allows to compensate for a small time drift between clients.
var refreshabilityTimeout = restic.StaleLockTimeout - refreshInterval*3/2
-func refreshLocks(ctx context.Context, lock *restic.Lock, lockInfo *lockContext, refreshed chan<- struct{}) {
+type refreshLockRequest struct {
+ result chan bool
+}
+
+func refreshLocks(ctx context.Context, backend restic.Backend, lockInfo *lockContext, refreshed chan<- struct{}, forceRefresh <-chan refreshLockRequest) {
debug.Log("start")
+ lock := lockInfo.lock
ticker := time.NewTicker(refreshInterval)
lastRefresh := lock.Time
@@ -99,6 +157,22 @@ func refreshLocks(ctx context.Context, lock *restic.Lock, lockInfo *lockContext,
case <-ctx.Done():
debug.Log("terminate")
return
+
+ case req := <-forceRefresh:
+ debug.Log("trying to refresh stale lock")
+ // keep on going if our current lock still exists
+ success := tryRefreshStaleLock(ctx, backend, lock, lockInfo.cancel)
+ // inform refresh goroutine about forced refresh
+ select {
+ case <-ctx.Done():
+ case req.result <- success:
+ }
+
+ if success {
+ // update lock refresh time
+ lastRefresh = lock.Time
+ }
+
case <-ticker.C:
if time.Since(lastRefresh) > refreshabilityTimeout {
// the lock is too old, wait until the expiry monitor cancels the context
@@ -111,7 +185,7 @@ func refreshLocks(ctx context.Context, lock *restic.Lock, lockInfo *lockContext,
Warnf("unable to refresh lock: %v\n", err)
} else {
lastRefresh = lock.Time
- // inform monitor gorountine about successful refresh
+ // inform monitor goroutine about successful refresh
select {
case <-ctx.Done():
case refreshed <- struct{}{}:
@@ -121,7 +195,7 @@ func refreshLocks(ctx context.Context, lock *restic.Lock, lockInfo *lockContext,
}
}
-func monitorLockRefresh(ctx context.Context, lock *restic.Lock, lockInfo *lockContext, refreshed <-chan struct{}) {
+func monitorLockRefresh(ctx context.Context, lockInfo *lockContext, refreshed <-chan struct{}, forceRefresh chan<- refreshLockRequest) {
// time.Now() might use a monotonic timer which is paused during standby
// convert to unix time to ensure we compare real time values
lastRefresh := time.Now().UnixNano()
@@ -133,24 +207,47 @@ func monitorLockRefresh(ctx context.Context, lock *restic.Lock, lockInfo *lockCo
// timers are paused during standby, which is a problem as the refresh timeout
// _must_ expire if the host was too long in standby. Thus fall back to periodic checks
// https://github.com/golang/go/issues/35012
- timer := time.NewTimer(pollDuration)
+ ticker := time.NewTicker(pollDuration)
defer func() {
- timer.Stop()
+ ticker.Stop()
lockInfo.cancel()
lockInfo.refreshWG.Done()
}()
+ var refreshStaleLockResult chan bool
+
for {
select {
case <-ctx.Done():
debug.Log("terminate expiry monitoring")
return
case <-refreshed:
+ if refreshStaleLockResult != nil {
+ // ignore delayed refresh notifications while the stale lock is refreshed
+ continue
+ }
lastRefresh = time.Now().UnixNano()
- case <-timer.C:
- if time.Now().UnixNano()-lastRefresh < refreshabilityTimeout.Nanoseconds() {
- // restart timer
- timer.Reset(pollDuration)
+ case <-ticker.C:
+ if time.Now().UnixNano()-lastRefresh < refreshabilityTimeout.Nanoseconds() || refreshStaleLockResult != nil {
+ continue
+ }
+
+ debug.Log("trying to refreshStaleLock")
+ // keep on going if our current lock still exists
+ refreshReq := refreshLockRequest{
+ result: make(chan bool),
+ }
+ refreshStaleLockResult = refreshReq.result
+
+ // inform refresh goroutine about forced refresh
+ select {
+ case <-ctx.Done():
+ case forceRefresh <- refreshReq:
+ }
+ case success := <-refreshStaleLockResult:
+ if success {
+ lastRefresh = time.Now().UnixNano()
+ refreshStaleLockResult = nil
continue
}
@@ -160,6 +257,25 @@ func monitorLockRefresh(ctx context.Context, lock *restic.Lock, lockInfo *lockCo
}
}
+func tryRefreshStaleLock(ctx context.Context, backend restic.Backend, lock *restic.Lock, cancel context.CancelFunc) bool {
+ freeze := restic.AsBackend[restic.FreezeBackend](backend)
+ if freeze != nil {
+ debug.Log("freezing backend")
+ freeze.Freeze()
+ defer freeze.Unfreeze()
+ }
+
+ err := lock.RefreshStaleLock(ctx)
+ if err != nil {
+ Warnf("failed to refresh stale lock: %v\n", err)
+ // cancel context while the backend is still frozen to prevent accidental modifications
+ cancel()
+ return false
+ }
+
+ return true
+}
+
func unlockRepo(lock *restic.Lock) {
if lock == nil {
return
diff --git a/cmd/restic/lock_test.go b/cmd/restic/lock_test.go
index c074f15a6..2f8420853 100644
--- a/cmd/restic/lock_test.go
+++ b/cmd/restic/lock_test.go
@@ -3,30 +3,42 @@ package main
import (
"context"
"fmt"
+ "runtime"
+ "strings"
+ "sync"
"testing"
"time"
+ "github.com/restic/restic/internal/backend/location"
+ "github.com/restic/restic/internal/backend/mem"
+ "github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
- rtest "github.com/restic/restic/internal/test"
+ "github.com/restic/restic/internal/test"
)
-func openTestRepo(t *testing.T, wrapper backendWrapper) (*repository.Repository, func(), *testEnvironment) {
+func openLockTestRepo(t *testing.T, wrapper backendWrapper) (*repository.Repository, func(), *testEnvironment) {
env, cleanup := withTestEnvironment(t)
+
+ reg := location.NewRegistry()
+ reg.Register(mem.NewFactory())
+ env.gopts.backends = reg
+ env.gopts.Repo = "mem:"
+
if wrapper != nil {
env.gopts.backendTestHook = wrapper
}
testRunInit(t, env.gopts)
repo, err := OpenRepository(context.TODO(), env.gopts)
- rtest.OK(t, err)
+ test.OK(t, err)
return repo, cleanup, env
}
-func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository) (*restic.Lock, context.Context) {
- lock, wrappedCtx, err := lockRepo(ctx, repo)
- rtest.OK(t, err)
- rtest.OK(t, wrappedCtx.Err())
+func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, env *testEnvironment) (*restic.Lock, context.Context) {
+ lock, wrappedCtx, err := lockRepo(ctx, repo, env.gopts.RetryLock, env.gopts.JSON)
+ test.OK(t, err)
+ test.OK(t, wrappedCtx.Err())
if lock.Stale() {
t.Fatal("lock returned stale lock")
}
@@ -34,10 +46,10 @@ func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository)
}
func TestLock(t *testing.T) {
- repo, cleanup, _ := openTestRepo(t, nil)
+ repo, cleanup, env := openLockTestRepo(t, nil)
defer cleanup()
- lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo)
+ lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env)
unlockRepo(lock)
if wrappedCtx.Err() == nil {
t.Fatal("unlock did not cancel context")
@@ -45,12 +57,12 @@ func TestLock(t *testing.T) {
}
func TestLockCancel(t *testing.T) {
- repo, cleanup, _ := openTestRepo(t, nil)
+ repo, cleanup, env := openLockTestRepo(t, nil)
defer cleanup()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- lock, wrappedCtx := checkedLockRepo(ctx, t, repo)
+ lock, wrappedCtx := checkedLockRepo(ctx, t, repo, env)
cancel()
if wrappedCtx.Err() == nil {
t.Fatal("canceled parent context did not cancel context")
@@ -61,12 +73,12 @@ func TestLockCancel(t *testing.T) {
}
func TestLockUnlockAll(t *testing.T) {
- repo, cleanup, _ := openTestRepo(t, nil)
+ repo, cleanup, env := openLockTestRepo(t, nil)
defer cleanup()
- lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo)
+ lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env)
_, err := unlockAll(0)
- rtest.OK(t, err)
+ test.OK(t, err)
if wrappedCtx.Err() == nil {
t.Fatal("canceled parent context did not cancel context")
}
@@ -76,18 +88,19 @@ func TestLockUnlockAll(t *testing.T) {
}
func TestLockConflict(t *testing.T) {
- repo, cleanup, env := openTestRepo(t, nil)
+ repo, cleanup, env := openLockTestRepo(t, nil)
defer cleanup()
repo2, err := OpenRepository(context.TODO(), env.gopts)
- rtest.OK(t, err)
+ test.OK(t, err)
- lock, _, err := lockRepoExclusive(context.Background(), repo)
- rtest.OK(t, err)
+ lock, _, err := lockRepoExclusive(context.Background(), repo, env.gopts.RetryLock, env.gopts.JSON)
+ test.OK(t, err)
defer unlockRepo(lock)
- _, _, err = lockRepo(context.Background(), repo2)
+ _, _, err = lockRepo(context.Background(), repo2, env.gopts.RetryLock, env.gopts.JSON)
if err == nil {
t.Fatal("second lock should have failed")
}
+ test.Assert(t, restic.IsAlreadyLocked(err), "unexpected error %v", err)
}
type writeOnceBackend struct {
@@ -104,7 +117,7 @@ func (b *writeOnceBackend) Save(ctx context.Context, h restic.Handle, rd restic.
}
func TestLockFailedRefresh(t *testing.T) {
- repo, cleanup, _ := openTestRepo(t, func(r restic.Backend) (restic.Backend, error) {
+ repo, cleanup, env := openLockTestRepo(t, func(r restic.Backend) (restic.Backend, error) {
return &writeOnceBackend{Backend: r}, nil
})
defer cleanup()
@@ -117,7 +130,7 @@ func TestLockFailedRefresh(t *testing.T) {
refreshInterval, refreshabilityTimeout = ri, rt
}()
- lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo)
+ lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env)
select {
case <-wrappedCtx.Done():
@@ -136,11 +149,13 @@ type loggingBackend struct {
func (b *loggingBackend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
b.t.Logf("save %v @ %v", h, time.Now())
- return b.Backend.Save(ctx, h, rd)
+ err := b.Backend.Save(ctx, h, rd)
+ b.t.Logf("save finished %v @ %v", h, time.Now())
+ return err
}
func TestLockSuccessfulRefresh(t *testing.T) {
- repo, cleanup, _ := openTestRepo(t, func(r restic.Backend) (restic.Backend, error) {
+ repo, cleanup, env := openLockTestRepo(t, func(r restic.Backend) (restic.Backend, error) {
return &loggingBackend{
Backend: r,
t: t,
@@ -151,20 +166,163 @@ func TestLockSuccessfulRefresh(t *testing.T) {
t.Logf("test for successful lock refresh %v", time.Now())
// reduce locking intervals to be suitable for testing
ri, rt := refreshInterval, refreshabilityTimeout
- refreshInterval = 40 * time.Millisecond
- refreshabilityTimeout = 200 * time.Millisecond
+ refreshInterval = 60 * time.Millisecond
+ refreshabilityTimeout = 500 * time.Millisecond
defer func() {
refreshInterval, refreshabilityTimeout = ri, rt
}()
- lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo)
+ lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env)
select {
case <-wrappedCtx.Done():
- t.Fatal("lock refresh failed")
+ // don't call t.Fatal to allow the lock to be properly cleaned up
+ t.Error("lock refresh failed", time.Now())
+
+ // Dump full stacktrace
+ buf := make([]byte, 1024*1024)
+ n := runtime.Stack(buf, true)
+ buf = buf[:n]
+ t.Log(string(buf))
+
case <-time.After(2 * refreshabilityTimeout):
// expected lock refresh to work
}
// unlockRepo should not crash
unlockRepo(lock)
}
+
+type slowBackend struct {
+ restic.Backend
+ m sync.Mutex
+ sleep time.Duration
+}
+
+func (b *slowBackend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
+ b.m.Lock()
+ sleep := b.sleep
+ b.m.Unlock()
+ time.Sleep(sleep)
+ return b.Backend.Save(ctx, h, rd)
+}
+
+func TestLockSuccessfulStaleRefresh(t *testing.T) {
+ var sb *slowBackend
+ repo, cleanup, env := openLockTestRepo(t, func(r restic.Backend) (restic.Backend, error) {
+ sb = &slowBackend{Backend: r}
+ return sb, nil
+ })
+ defer cleanup()
+
+ t.Logf("test for successful lock refresh %v", time.Now())
+ // reduce locking intervals to be suitable for testing
+ ri, rt := refreshInterval, refreshabilityTimeout
+ refreshInterval = 10 * time.Millisecond
+ refreshabilityTimeout = 50 * time.Millisecond
+ defer func() {
+ refreshInterval, refreshabilityTimeout = ri, rt
+ }()
+
+ lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env)
+ // delay lock refreshing long enough that the lock would expire
+ sb.m.Lock()
+ sb.sleep = refreshabilityTimeout + refreshInterval
+ sb.m.Unlock()
+
+ select {
+ case <-wrappedCtx.Done():
+ // don't call t.Fatal to allow the lock to be properly cleaned up
+ t.Error("lock refresh failed", time.Now())
+
+ case <-time.After(refreshabilityTimeout):
+ }
+ // reset slow backend
+ sb.m.Lock()
+ sb.sleep = 0
+ sb.m.Unlock()
+ debug.Log("normal lock period has expired")
+
+ select {
+ case <-wrappedCtx.Done():
+ // don't call t.Fatal to allow the lock to be properly cleaned up
+ t.Error("lock refresh failed", time.Now())
+
+ case <-time.After(3 * refreshabilityTimeout):
+ // expected lock refresh to work
+ }
+
+ // unlockRepo should not crash
+ unlockRepo(lock)
+}
+
+func TestLockWaitTimeout(t *testing.T) {
+ repo, cleanup, env := openLockTestRepo(t, nil)
+ defer cleanup()
+
+ elock, _, err := lockRepoExclusive(context.TODO(), repo, env.gopts.RetryLock, env.gopts.JSON)
+ test.OK(t, err)
+
+ retryLock := 200 * time.Millisecond
+
+ start := time.Now()
+ lock, _, err := lockRepo(context.TODO(), repo, retryLock, env.gopts.JSON)
+ duration := time.Since(start)
+
+ test.Assert(t, err != nil,
+ "create normal lock with exclusively locked repo didn't return an error")
+ test.Assert(t, strings.Contains(err.Error(), "repository is already locked exclusively"),
+ "create normal lock with exclusively locked repo didn't return the correct error")
+ test.Assert(t, retryLock <= duration && duration < retryLock*3/2,
+ "create normal lock with exclusively locked repo didn't wait for the specified timeout")
+
+ test.OK(t, lock.Unlock())
+ test.OK(t, elock.Unlock())
+}
+
+func TestLockWaitCancel(t *testing.T) {
+ repo, cleanup, env := openLockTestRepo(t, nil)
+ defer cleanup()
+
+ elock, _, err := lockRepoExclusive(context.TODO(), repo, env.gopts.RetryLock, env.gopts.JSON)
+ test.OK(t, err)
+
+ retryLock := 200 * time.Millisecond
+ cancelAfter := 40 * time.Millisecond
+
+ start := time.Now()
+ ctx, cancel := context.WithCancel(context.TODO())
+ time.AfterFunc(cancelAfter, cancel)
+
+ lock, _, err := lockRepo(ctx, repo, retryLock, env.gopts.JSON)
+ duration := time.Since(start)
+
+ test.Assert(t, err != nil,
+ "create normal lock with exclusively locked repo didn't return an error")
+ test.Assert(t, strings.Contains(err.Error(), "context canceled"),
+ "create normal lock with exclusively locked repo didn't return the correct error")
+ test.Assert(t, cancelAfter <= duration && duration < retryLock-10*time.Millisecond,
+ "create normal lock with exclusively locked repo didn't return in time, duration %v", duration)
+
+ test.OK(t, lock.Unlock())
+ test.OK(t, elock.Unlock())
+}
+
+func TestLockWaitSuccess(t *testing.T) {
+ repo, cleanup, env := openLockTestRepo(t, nil)
+ defer cleanup()
+
+ elock, _, err := lockRepoExclusive(context.TODO(), repo, env.gopts.RetryLock, env.gopts.JSON)
+ test.OK(t, err)
+
+ retryLock := 200 * time.Millisecond
+ unlockAfter := 40 * time.Millisecond
+
+ time.AfterFunc(unlockAfter, func() {
+ test.OK(t, elock.Unlock())
+ })
+
+ lock, _, err := lockRepo(context.TODO(), repo, retryLock, env.gopts.JSON)
+ test.OK(t, err)
+
+ test.OK(t, lock.Unlock())
+}
diff --git a/cmd/restic/main.go b/cmd/restic/main.go
index cfef7c885..17b9c468d 100644
--- a/cmd/restic/main.go
+++ b/cmd/restic/main.go
@@ -7,6 +7,7 @@ import (
"log"
"os"
"runtime"
+ godebug "runtime/debug"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/options"
@@ -24,6 +25,8 @@ var cmdRoot = &cobra.Command{
Long: `
restic is a backup program which allows saving multiple revisions of files and
directories in an encrypted repository stored on different backends.
+
+The full documentation can be found at https://restic.readthedocs.io/ .
`,
SilenceErrors: true,
SilenceUsage: true,
@@ -63,11 +66,7 @@ directories in an encrypted repository stored on different backends.
// run the debug functions for all subcommands (if build tag "debug" is
// enabled)
- if err := runDebug(); err != nil {
- return err
- }
-
- return nil
+ return runDebug()
},
}
@@ -85,7 +84,16 @@ func needsPassword(cmd string) bool {
var logBuffer = bytes.NewBuffer(nil)
+func tweakGoGC() {
+ // lower GOGC from 100 to 50, unless it was manually overwritten by the user
+ oldValue := godebug.SetGCPercent(50)
+ if oldValue != 100 {
+ godebug.SetGCPercent(oldValue)
+ }
+}
+
func main() {
+ tweakGoGC()
// install custom global logger into a buffer, if an error occurs
// we can show the logs
log.SetOutput(logBuffer)