summaryrefslogtreecommitdiff
path: root/cmd
diff options
context:
space:
mode:
authorFélix Sipma <felix+debian@gueux.org>2017-11-27 11:41:45 +0100
committerFélix Sipma <felix+debian@gueux.org>2017-11-27 11:41:45 +0100
commit78bff2c05502559e33db1be73cf6ae383e0f96e6 (patch)
treed9d4f4757508eb2794c6276bddcd2232b4f4b834 /cmd
parent4bb9f95800766be38ccb07ec9a5134e87e5b0316 (diff)
New upstream version 0.8.0
Diffstat (limited to 'cmd')
-rw-r--r--cmd/restic/cleanup.go30
-rw-r--r--cmd/restic/cmd_autocomplete.go37
-rw-r--r--cmd/restic/cmd_backup.go22
-rw-r--r--cmd/restic/cmd_check.go23
-rw-r--r--cmd/restic/cmd_debug.go217
-rw-r--r--cmd/restic/cmd_dump.go248
-rw-r--r--cmd/restic/cmd_forget.go18
-rw-r--r--cmd/restic/cmd_generate.go94
-rw-r--r--cmd/restic/cmd_manpage.go70
-rw-r--r--cmd/restic/cmd_mount.go6
-rw-r--r--cmd/restic/cmd_prune.go26
-rw-r--r--cmd/restic/cmd_snapshots.go55
-rw-r--r--cmd/restic/cmd_tag.go6
-rw-r--r--cmd/restic/exclude.go97
-rw-r--r--cmd/restic/exclude_test.go2
-rw-r--r--cmd/restic/excludes31
-rw-r--r--cmd/restic/global.go74
-rw-r--r--cmd/restic/integration_fuse_test.go62
-rw-r--r--cmd/restic/integration_helpers_test.go35
-rw-r--r--cmd/restic/integration_test.go327
-rw-r--r--cmd/restic/local_layout_test.go8
-rw-r--r--cmd/restic/lock.go3
-rw-r--r--cmd/restic/table.go19
23 files changed, 1013 insertions, 497 deletions
diff --git a/cmd/restic/cleanup.go b/cmd/restic/cleanup.go
index 12dbdc142..04875fe45 100644
--- a/cmd/restic/cleanup.go
+++ b/cmd/restic/cleanup.go
@@ -14,15 +14,27 @@ var cleanupHandlers struct {
sync.Mutex
list []func() error
done bool
+ ch chan os.Signal
}
var stderr = os.Stderr
func init() {
- c := make(chan os.Signal)
- signal.Notify(c, syscall.SIGINT)
+ cleanupHandlers.ch = make(chan os.Signal)
+ go CleanupHandler(cleanupHandlers.ch)
+ InstallSignalHandler()
+}
+
+// InstallSignalHandler listens for SIGINT and SIGPIPE, and triggers the cleanup handlers.
+func InstallSignalHandler() {
+ signal.Notify(cleanupHandlers.ch, syscall.SIGINT)
+ signal.Notify(cleanupHandlers.ch, syscall.SIGPIPE)
+}
- go CleanupHandler(c)
+// SuspendSignalHandler removes the signal handler for SIGINT and SIGPIPE.
+func SuspendSignalHandler() {
+ signal.Reset(syscall.SIGINT)
+ signal.Reset(syscall.SIGPIPE)
}
// AddCleanupHandler adds the function f to the list of cleanup handlers so
@@ -57,12 +69,18 @@ func RunCleanupHandlers() {
cleanupHandlers.list = nil
}
-// CleanupHandler handles the SIGINT signal.
+// CleanupHandler handles the SIGINT and SIGPIPE signals.
func CleanupHandler(c <-chan os.Signal) {
for s := range c {
debug.Log("signal %v received, cleaning up", s)
- fmt.Printf("%sInterrupt received, cleaning up\n", ClearLine())
- Exit(0)
+ fmt.Fprintf(stderr, "%ssignal %v received, cleaning up\n", ClearLine(), s)
+
+ code := 0
+ if s != syscall.SIGINT {
+ code = 1
+ }
+
+ Exit(code)
}
}
diff --git a/cmd/restic/cmd_autocomplete.go b/cmd/restic/cmd_autocomplete.go
deleted file mode 100644
index 643bd96bf..000000000
--- a/cmd/restic/cmd_autocomplete.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package main
-
-import (
- "github.com/spf13/cobra"
-)
-
-var cmdAutocomplete = &cobra.Command{
- Use: "autocomplete",
- Short: "Generate shell autocompletion script",
- Long: `The "autocomplete" command generates a shell autocompletion script.
-
-NOTE: The current version supports Bash only.
- This should work for *nix systems with Bash installed.
-
-By default, the file is written directly to /etc/bash_completion.d
-for convenience, and the command may need superuser rights, e.g.:
-
-$ sudo restic autocomplete`,
-
- DisableAutoGenTag: true,
- RunE: func(cmd *cobra.Command, args []string) error {
- if err := cmdRoot.GenBashCompletionFile(autocompleteTarget); err != nil {
- return err
- }
- return nil
- },
-}
-
-var autocompleteTarget string
-
-func init() {
- cmdRoot.AddCommand(cmdAutocomplete)
-
- cmdAutocomplete.Flags().StringVarP(&autocompleteTarget, "completionfile", "", "/usr/share/bash-completion/completions/restic", "autocompletion file")
- // For bash-completion
- cmdAutocomplete.Flags().SetAnnotation("completionfile", cobra.BashCompFilenameExt, []string{})
-}
diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go
index 8a69f5cbf..eb04b6c4c 100644
--- a/cmd/restic/cmd_backup.go
+++ b/cmd/restic/cmd_backup.go
@@ -83,7 +83,7 @@ func init() {
f.BoolVar(&backupOptions.Stdin, "stdin", false, "read backup from stdin")
f.StringVar(&backupOptions.StdinFilename, "stdin-filename", "stdin", "file name to use when reading from stdin")
f.StringArrayVar(&backupOptions.Tags, "tag", nil, "add a `tag` for the new snapshot (can be specified multiple times)")
- f.StringVar(&backupOptions.Hostname, "hostname", "", "set the `hostname` for the snapshot manually")
+ f.StringVar(&backupOptions.Hostname, "hostname", "", "set the `hostname` for the snapshot manually. To prevent an expensive rescan use the \"parent\" flag")
f.StringVar(&backupOptions.FilesFrom, "files-from", "", "read the files to backup from file (can be combined with file args)")
f.StringVar(&backupOptions.TimeStamp, "time", "", "time of the backup (ex. '2012-11-01 22:08:41') (default: now)")
}
@@ -298,9 +298,14 @@ func readLinesFromFile(filename string) ([]string, error) {
scanner := bufio.NewScanner(r)
for scanner.Scan() {
line := scanner.Text()
+ // ignore empty lines
if line == "" {
continue
}
+ // strip comments
+ if strings.HasPrefix(line, "#") {
+ continue
+ }
lines = append(lines, line)
}
@@ -367,8 +372,9 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, args []string) error {
opts.ExcludeIfPresent = append(opts.ExcludeIfPresent, "CACHEDIR.TAG:Signature: 8a477f597d28d172789f06886806bc55")
}
+ rc := &rejectionCache{}
for _, spec := range opts.ExcludeIfPresent {
- f, err := rejectIfPresent(spec)
+ f, err := rejectIfPresent(spec, rc)
if err != nil {
return err
}
@@ -387,6 +393,16 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, args []string) error {
return err
}
+ // exclude restic cache
+ if repo.Cache != nil {
+ f, err := rejectResticCache(repo)
+ if err != nil {
+ return err
+ }
+
+ rejectFuncs = append(rejectFuncs, f)
+ }
+
err = repo.LoadIndex(context.TODO())
if err != nil {
return err
@@ -406,7 +422,7 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, args []string) error {
// Find last snapshot to set it as parent, if not already set
if !opts.Force && parentSnapshotID == nil {
- id, err := restic.FindLatestSnapshot(context.TODO(), repo, target, []restic.TagList{opts.Tags}, opts.Hostname)
+ id, err := restic.FindLatestSnapshot(context.TODO(), repo, target, []restic.TagList{}, opts.Hostname)
if err == nil {
parentSnapshotID = &id
} else if err != restic.ErrNoSnapshotFound {
diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go
index b64429a0e..e1fd1208f 100644
--- a/cmd/restic/cmd_check.go
+++ b/cmd/restic/cmd_check.go
@@ -19,6 +19,9 @@ var cmdCheck = &cobra.Command{
Long: `
The "check" command tests the repository for errors and reports any errors it
finds. It can also be used to read all data and therefore simulate a restore.
+
+By default, the "check" command will always load all data directly from the
+repository and not use a local cache.
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
@@ -30,6 +33,7 @@ finds. It can also be used to read all data and therefore simulate a restore.
type CheckOptions struct {
ReadData bool
CheckUnused bool
+ WithCache bool
}
var checkOptions CheckOptions
@@ -40,6 +44,7 @@ func init() {
f := cmdCheck.Flags()
f.BoolVar(&checkOptions.ReadData, "read-data", false, "read all data blobs")
f.BoolVar(&checkOptions.CheckUnused, "check-unused", false, "find unused blobs")
+ f.BoolVar(&checkOptions.WithCache, "with-cache", false, "use the cache")
}
func newReadProgress(gopts GlobalOptions, todo restic.Stat) *restic.Progress {
@@ -77,13 +82,18 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
return errors.Fatal("check has no arguments")
}
+ if !opts.WithCache {
+ // do not use a cache for the checker
+ gopts.NoCache = true
+ }
+
repo, err := OpenRepository(gopts)
if err != nil {
return err
}
if !gopts.NoLock {
- Verbosef("Create exclusive lock for repository\n")
+ Verbosef("create exclusive lock for repository\n")
lock, err := lockRepoExclusive(repo)
defer unlockRepo(lock)
if err != nil {
@@ -93,7 +103,7 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
chkr := checker.New(repo)
- Verbosef("Load indexes\n")
+ Verbosef("load indexes\n")
hints, errs := chkr.LoadIndex(context.TODO())
dupFound := false
@@ -118,7 +128,7 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
errorsFound := false
errChan := make(chan error)
- Verbosef("Check all packs\n")
+ Verbosef("check all packs\n")
go chkr.Packs(context.TODO(), errChan)
for err := range errChan {
@@ -126,7 +136,7 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
fmt.Fprintf(os.Stderr, "%v\n", err)
}
- Verbosef("Check snapshots, trees and blobs\n")
+ Verbosef("check snapshots, trees and blobs\n")
errChan = make(chan error)
go chkr.Structure(context.TODO(), errChan)
@@ -150,7 +160,7 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
}
if opts.ReadData {
- Verbosef("Read all data\n")
+ Verbosef("read all data\n")
p := newReadProgress(gopts, restic.Stat{Blobs: chkr.CountPacks()})
errChan := make(chan error)
@@ -166,5 +176,8 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
if errorsFound {
return errors.Fatal("repository contains errors")
}
+
+ Verbosef("no errors were found\n")
+
return nil
}
diff --git a/cmd/restic/cmd_debug.go b/cmd/restic/cmd_debug.go
new file mode 100644
index 000000000..6a06e96ed
--- /dev/null
+++ b/cmd/restic/cmd_debug.go
@@ -0,0 +1,217 @@
+// +build debug
+
+package main
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/spf13/cobra"
+
+ "github.com/restic/restic/internal/errors"
+ "github.com/restic/restic/internal/pack"
+ "github.com/restic/restic/internal/repository"
+ "github.com/restic/restic/internal/restic"
+
+ "github.com/restic/restic/internal/worker"
+)
+
+var cmdDebug = &cobra.Command{
+ Use: "debug",
+ Short: "Debug commands",
+}
+
+var cmdDebugDump = &cobra.Command{
+ Use: "dump [indexes|snapshots|all|packs]",
+ Short: "Dump data structures",
+ Long: `
+The "dump" command dumps data structures from the repository as JSON objects. It
+is used for debugging purposes only.`,
+ DisableAutoGenTag: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runDebugDump(globalOptions, args)
+ },
+}
+
+func init() {
+ cmdRoot.AddCommand(cmdDebug)
+ cmdDebug.AddCommand(cmdDebugDump)
+}
+
+func prettyPrintJSON(wr io.Writer, item interface{}) error {
+ buf, err := json.MarshalIndent(item, "", " ")
+ if err != nil {
+ return err
+ }
+
+ _, err = wr.Write(append(buf, '\n'))
+ return err
+}
+
+func debugPrintSnapshots(repo *repository.Repository, wr io.Writer) error {
+ for id := range repo.List(context.TODO(), restic.SnapshotFile) {
+ snapshot, err := restic.LoadSnapshot(context.TODO(), repo, id)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "LoadSnapshot(%v): %v", id.Str(), err)
+ continue
+ }
+
+ fmt.Fprintf(wr, "snapshot_id: %v\n", id)
+
+ err = prettyPrintJSON(wr, snapshot)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+const dumpPackWorkers = 10
+
+// Pack is the struct used in printPacks.
+type Pack struct {
+ Name string `json:"name"`
+
+ Blobs []Blob `json:"blobs"`
+}
+
+// Blob is the struct used in printPacks.
+type Blob struct {
+ Type restic.BlobType `json:"type"`
+ Length uint `json:"length"`
+ ID restic.ID `json:"id"`
+ Offset uint `json:"offset"`
+}
+
+func printPacks(repo *repository.Repository, wr io.Writer) error {
+ f := func(ctx context.Context, job worker.Job) (interface{}, error) {
+ name := job.Data.(string)
+
+ h := restic.Handle{Type: restic.DataFile, Name: name}
+
+ blobInfo, err := repo.Backend().Stat(ctx, h)
+ if err != nil {
+ return nil, err
+ }
+
+ blobs, err := pack.List(repo.Key(), restic.ReaderAt(repo.Backend(), h), blobInfo.Size)
+ if err != nil {
+ return nil, err
+ }
+
+ return blobs, nil
+ }
+
+ jobCh := make(chan worker.Job)
+ resCh := make(chan worker.Job)
+ wp := worker.New(context.TODO(), dumpPackWorkers, f, jobCh, resCh)
+
+ go func() {
+ for name := range repo.Backend().List(context.TODO(), restic.DataFile) {
+ jobCh <- worker.Job{Data: name}
+ }
+ close(jobCh)
+ }()
+
+ for job := range resCh {
+ name := job.Data.(string)
+
+ if job.Error != nil {
+ fmt.Fprintf(os.Stderr, "error for pack %v: %v\n", name, job.Error)
+ continue
+ }
+
+ entries := job.Result.([]restic.Blob)
+ p := Pack{
+ Name: name,
+ Blobs: make([]Blob, len(entries)),
+ }
+ for i, blob := range entries {
+ p.Blobs[i] = Blob{
+ Type: blob.Type,
+ Length: blob.Length,
+ ID: blob.ID,
+ Offset: blob.Offset,
+ }
+ }
+
+ prettyPrintJSON(os.Stdout, p)
+ }
+
+ wp.Wait()
+
+ return nil
+}
+
+func dumpIndexes(repo restic.Repository) error {
+ for id := range repo.List(context.TODO(), restic.IndexFile) {
+ fmt.Printf("index_id: %v\n", id)
+
+ idx, err := repository.LoadIndex(context.TODO(), repo, id)
+ if err != nil {
+ return err
+ }
+
+ err = idx.Dump(os.Stdout)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func runDebugDump(gopts GlobalOptions, args []string) error {
+ if len(args) != 1 {
+ return errors.Fatal("type not specified")
+ }
+
+ repo, err := OpenRepository(gopts)
+ if err != nil {
+ return err
+ }
+
+ if !gopts.NoLock {
+ lock, err := lockRepo(repo)
+ defer unlockRepo(lock)
+ if err != nil {
+ return err
+ }
+ }
+
+ err = repo.LoadIndex(context.TODO())
+ if err != nil {
+ return err
+ }
+
+ tpe := args[0]
+
+ switch tpe {
+ case "indexes":
+ return dumpIndexes(repo)
+ case "snapshots":
+ return debugPrintSnapshots(repo, os.Stdout)
+ case "packs":
+ return printPacks(repo, os.Stdout)
+ case "all":
+ fmt.Printf("snapshots:\n")
+ err := debugPrintSnapshots(repo, os.Stdout)
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("\nindexes:\n")
+ err = dumpIndexes(repo)
+ if err != nil {
+ return err
+ }
+
+ return nil
+ default:
+ return errors.Fatalf("no such type %q", tpe)
+ }
+}
diff --git a/cmd/restic/cmd_dump.go b/cmd/restic/cmd_dump.go
index 215de8dad..7d07552d2 100644
--- a/cmd/restic/cmd_dump.go
+++ b/cmd/restic/cmd_dump.go
@@ -1,168 +1,134 @@
-// xbuild debug
-
package main
import (
"context"
- "encoding/json"
"fmt"
- "io"
"os"
+ "path/filepath"
- "github.com/spf13/cobra"
-
+ "github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
- "github.com/restic/restic/internal/pack"
- "github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
- "github.com/restic/restic/internal/worker"
+ "github.com/spf13/cobra"
)
var cmdDump = &cobra.Command{
- Use: "dump [indexes|snapshots|trees|all|packs]",
- Short: "Dump data structures",
+ Use: "dump [flags] snapshotID file",
+ Short: "Print a backed-up file to stdout",
Long: `
-The "dump" command dumps data structures from the repository as JSON objects. It
-is used for debugging purposes only.`,
+The "dump" command extracts a single file from a snapshot from the repository and
+prints its contents to stdout.
+
+The special snapshot "latest" can be used to use the latest snapshot in the
+repository.
+`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
- return runDump(globalOptions, args)
+ return runDump(dumpOptions, globalOptions, args)
},
}
+// DumpOptions collects all options for the dump command.
+type DumpOptions struct {
+ Host string
+ Paths []string
+ Tags restic.TagLists
+}
+
+var dumpOptions DumpOptions
+
func init() {
cmdRoot.AddCommand(cmdDump)
+
+ flags := cmdDump.Flags()
+ flags.StringVarP(&dumpOptions.Host, "host", "H", "", `only consider snapshots for this host when the snapshot ID is "latest"`)
+ flags.Var(&dumpOptions.Tags, "tag", "only consider snapshots which include this `taglist` for snapshot ID \"latest\"")
+ flags.StringArrayVar(&dumpOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path` for snapshot ID \"latest\"")
}
-func prettyPrintJSON(wr io.Writer, item interface{}) error {
- buf, err := json.MarshalIndent(item, "", " ")
- if err != nil {
- return err
+func splitPath(path string) []string {
+ d, f := filepath.Split(path)
+ if d == "" || d == "/" {
+ return []string{f}
}
-
- _, err = wr.Write(append(buf, '\n'))
- return err
+ s := splitPath(filepath.Clean(d))
+ return append(s, f)
}
-func debugPrintSnapshots(repo *repository.Repository, wr io.Writer) error {
- for id := range repo.List(context.TODO(), restic.SnapshotFile) {
- snapshot, err := restic.LoadSnapshot(context.TODO(), repo, id)
- if err != nil {
- fmt.Fprintf(os.Stderr, "LoadSnapshot(%v): %v", id.Str(), err)
- continue
- }
-
- fmt.Fprintf(wr, "snapshot_id: %v\n", id)
-
- err = prettyPrintJSON(wr, snapshot)
+func dumpNode(ctx context.Context, repo restic.Repository, node *restic.Node) error {
+ var buf []byte
+ for _, id := range node.Content {
+ size, err := repo.LookupBlobSize(id, restic.DataBlob)
if err != nil {
return err
}
- }
-
- return nil
-}
-
-const dumpPackWorkers = 10
-
-// Pack is the struct used in printPacks.
-type Pack struct {
- Name string `json:"name"`
-
- Blobs []Blob `json:"blobs"`
-}
-
-// Blob is the struct used in printPacks.
-type Blob struct {
- Type restic.BlobType `json:"type"`
- Length uint `json:"length"`
- ID restic.ID `json:"id"`
- Offset uint `json:"offset"`
-}
-
-func printPacks(repo *repository.Repository, wr io.Writer) error {
- f := func(ctx context.Context, job worker.Job) (interface{}, error) {
- name := job.Data.(string)
- h := restic.Handle{Type: restic.DataFile, Name: name}
+ buf = buf[:cap(buf)]
+ if len(buf) < restic.CiphertextLength(int(size)) {
+ buf = restic.NewBlobBuffer(int(size))
+ }
- blobInfo, err := repo.Backend().Stat(ctx, h)
+ n, err := repo.LoadBlob(ctx, restic.DataBlob, id, buf)
if err != nil {
- return nil, err
+ return err
}
+ buf = buf[:n]
- blobs, err := pack.List(repo.Key(), restic.ReaderAt(repo.Backend(), h), blobInfo.Size)
+ _, err = os.Stdout.Write(buf)
if err != nil {
- return nil, err
+ return errors.Wrap(err, "Write")
}
-
- return blobs, nil
}
+ return nil
+}
- jobCh := make(chan worker.Job)
- resCh := make(chan worker.Job)
- wp := worker.New(context.TODO(), dumpPackWorkers, f, jobCh, resCh)
-
- go func() {
- for name := range repo.Backend().List(context.TODO(), restic.DataFile) {
- jobCh <- worker.Job{Data: name}
- }
- close(jobCh)
- }()
-
- for job := range resCh {
- name := job.Data.(string)
-
- if job.Error != nil {
- fmt.Fprintf(os.Stderr, "error for pack %v: %v\n", name, job.Error)
- continue
- }
-
- entries := job.Result.([]restic.Blob)
- p := Pack{
- Name: name,
- Blobs: make([]Blob, len(entries)),
- }
- for i, blob := range entries {
- p.Blobs[i] = Blob{
- Type: blob.Type,
- Length: blob.Length,
- ID: blob.ID,
- Offset: blob.Offset,
+func printFromTree(ctx context.Context, tree *restic.Tree, repo restic.Repository, prefix string, pathComponents []string) error {
+ if tree == nil {
+ return fmt.Errorf("called with a nil tree")
+ }
+ if repo == nil {
+ return fmt.Errorf("called with a nil repository")
+ }
+ l := len(pathComponents)
+ if l == 0 {
+ return fmt.Errorf("empty path components")
+ }
+ item := filepath.Join(prefix, pathComponents[0])
+ for _, node := range tree.Nodes {
+ if node.Name == pathComponents[0] {
+ switch {
+ case l == 1 && node.Type == "file":
+ return dumpNode(ctx, repo, node)
+ case l > 1 && node.Type == "dir":
+ subtree, err := repo.LoadTree(ctx, *node.Subtree)
+ if err != nil {
+ return errors.Wrapf(err, "cannot load subtree for %q", item)
+ }
+ return printFromTree(ctx, subtree, repo, item, pathComponents[1:])
+ case l > 1:
+ return fmt.Errorf("%q should be a dir, but s a %q", item, node.Type)
+ case node.Type != "file":
+ return fmt.Errorf("%q should be a file, but is a %q", item, node.Type)
}
}
-
- prettyPrintJSON(os.Stdout, p)
}
-
- wp.Wait()
-
- return nil
+ return fmt.Errorf("path %q not found in snapshot", item)
}
-func dumpIndexes(repo restic.Repository) error {
- for id := range repo.List(context.TODO(), restic.IndexFile) {
- fmt.Printf("index_id: %v\n", id)
-
- idx, err := repository.LoadIndex(context.TODO(), repo, id)
- if err != nil {
- return err
- }
+func runDump(opts DumpOptions, gopts GlobalOptions, args []string) error {
+ ctx := gopts.ctx
- err = idx.Dump(os.Stdout)
- if err != nil {
- return err
- }
+ if len(args) != 2 {
+ return errors.Fatal("no file and no snapshot ID specified")
}
- return nil
-}
+ snapshotIDString := args[0]
+ pathToPrint := args[1]
-func runDump(gopts GlobalOptions, args []string) error {
- if len(args) != 1 {
- return errors.Fatal("type not specified")
- }
+ debug.Log("dump file %q from %q", pathToPrint, snapshotIDString)
+
+ splittedPath := splitPath(pathToPrint)
repo, err := OpenRepository(gopts)
if err != nil {
@@ -177,35 +143,39 @@ func runDump(gopts GlobalOptions, args []string) error {
}
}
- err = repo.LoadIndex(context.TODO())
+ err = repo.LoadIndex(ctx)
if err != nil {
return err
}
- tpe := args[0]
-
- switch tpe {
- case "indexes":
- return dumpIndexes(repo)
- case "snapshots":
- return debugPrintSnapshots(repo, os.Stdout)
- case "packs":
- return printPacks(repo, os.Stdout)
- case "all":
- fmt.Printf("snapshots:\n")
- err := debugPrintSnapshots(repo, os.Stdout)
+ var id restic.ID
+
+ if snapshotIDString == "latest" {
+ id, err = restic.FindLatestSnapshot(ctx, repo, opts.Paths, opts.Tags, opts.Host)
if err != nil {
- return err
+ Exitf(1, "latest snapshot for criteria not found: %v Paths:%v Host:%v", err, opts.Paths, opts.Host)
}
-
- fmt.Printf("\nindexes:\n")
- err = dumpIndexes(repo)
+ } else {
+ id, err = restic.FindSnapshot(repo, snapshotIDString)
if err != nil {
- return err
+ Exitf(1, "invalid id %q: %v", snapshotIDString, err)
}
+ }
- return nil
- default:
- return errors.Fatalf("no such type %q", tpe)
+ sn, err := restic.LoadSnapshot(context.TODO(), repo, id)
+ if err != nil {
+ Exitf(2, "loading snapshot %q failed: %v", snapshotIDString, err)
}
+
+ tree, err := repo.LoadTree(ctx, *sn.Tree)
+ if err != nil {
+ Exitf(2, "loading tree for snapshot %q failed: %v", snapshotIDString, err)
+ }
+
+ err = printFromTree(ctx, tree, repo, "", splittedPath)
+ if err != nil {
+ Exitf(2, "cannot dump file: %v", err)
+ }
+
+ return nil
}
diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go
index e26e33696..128b73905 100644
--- a/cmd/restic/cmd_forget.go
+++ b/cmd/restic/cmd_forget.go
@@ -35,9 +35,10 @@ type ForgetOptions struct {
Yearly int
KeepTags restic.TagLists
- Host string
- Tags restic.TagLists
- Paths []string
+ Host string
+ Tags restic.TagLists
+ Paths []string
+ Compact bool
// Grouping
GroupBy string
@@ -65,6 +66,7 @@ func init() {
f.StringVar(&forgetOptions.Host, "hostname", "", "only consider snapshots with the given `hostname` (deprecated)")
f.Var(&forgetOptions.Tags, "tag", "only consider snapshots which include this `taglist` in the format `tag[,tag,...]` (can be specified multiple times)")
f.StringArrayVar(&forgetOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path` (can be specified multiple times)")
+ f.BoolVarP(&forgetOptions.Compact, "compact", "c", false, "use compact format")
f.StringVarP(&forgetOptions.GroupBy, "group-by", "g", "host,paths", "string for grouping snapshots by host,paths,tags")
f.BoolVarP(&forgetOptions.DryRun, "dry-run", "n", false, "do not delete anything, just print what would be done")
@@ -114,6 +116,8 @@ func runForget(opts ForgetOptions, gopts GlobalOptions, args []string) error {
}
}
+ removeSnapshots := 0
+
ctx, cancel := context.WithCancel(gopts.ctx)
defer cancel()
for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, args) {
@@ -125,11 +129,12 @@ func runForget(opts ForgetOptions, gopts GlobalOptions, args []string) error {
return err
}
Verbosef("removed snapshot %v\n", sn.ID().Str())
+ removeSnapshots++
} else {
Verbosef("would have removed snapshot %v\n", sn.ID().Str())
}
} else {
- // Determing grouping-keys
+ // Determining grouping-keys
var tags []string
var hostname string
var paths []string
@@ -176,7 +181,6 @@ func runForget(opts ForgetOptions, gopts GlobalOptions, args []string) error {
return nil
}
- removeSnapshots := 0
for k, snapshotGroup := range snapshotGroups {
var key key
if json.Unmarshal([]byte(k), &key) != nil {
@@ -204,13 +208,13 @@ func runForget(opts ForgetOptions, gopts GlobalOptions, args []string) error {
if len(keep) != 0 && !gopts.Quiet {
Printf("keep %d snapshots:\n", len(keep))
- PrintSnapshots(globalOptions.stdout, keep, false)
+ PrintSnapshots(globalOptions.stdout, keep, opts.Compact)
Printf("\n")
}
if len(remove) != 0 && !gopts.Quiet {
Printf("remove %d snapshots:\n", len(remove))
- PrintSnapshots(globalOptions.stdout, remove, false)
+ PrintSnapshots(globalOptions.stdout, remove, opts.Compact)
Printf("\n")
}
diff --git a/cmd/restic/cmd_generate.go b/cmd/restic/cmd_generate.go
new file mode 100644
index 000000000..5c42537dc
--- /dev/null
+++ b/cmd/restic/cmd_generate.go
@@ -0,0 +1,94 @@
+package main
+
+import (
+ "time"
+
+ "github.com/restic/restic/internal/errors"
+ "github.com/spf13/cobra"
+ "github.com/spf13/cobra/doc"
+)
+
+var cmdGenerate = &cobra.Command{
+ Use: "generate [command]",
+ Short: "Generate manual pages and auto-completion files (bash, zsh)",
+ Long: `
+The "generate" command writes automatically generated files like the man pages
+and the auto-completion files for bash and zsh).
+`,
+ DisableAutoGenTag: true,
+ RunE: runGenerate,
+}
+
+type generateOptions struct {
+ ManDir string
+ BashCompletionFile string
+ ZSHCompletionFile string
+}
+
+var genOpts generateOptions
+
+func init() {
+ cmdRoot.AddCommand(cmdGenerate)
+ fs := cmdGenerate.Flags()
+ fs.StringVar(&genOpts.ManDir, "man", "", "write man pages to `directory`")
+ fs.StringVar(&genOpts.BashCompletionFile, "bash-completion", "", "write bash completion `file`")
+ fs.StringVar(&genOpts.ZSHCompletionFile, "zsh-completion", "", "write zsh completion `file`")
+}
+
+func writeManpages(dir string) error {
+ // use a fixed date for the man pages so that generating them is deterministic
+ date, err := time.Parse("Jan 2006", "Jan 2017")
+ if err != nil {
+ return err
+ }
+
+ header := &doc.GenManHeader{
+ Title: "restic backup",
+ Section: "1",
+ Source: "generated by `restic generate`",
+ Date: &date,
+ }
+
+ Verbosef("writing man pages to directory %v\n", dir)
+ return doc.GenManTree(cmdRoot, header, dir)
+}
+
+func writeBashCompletion(file string) error {
+ Verbosef("writing bash completion file to %v\n", file)
+ return cmdRoot.GenBashCompletionFile(file)
+}
+
+func writeZSHCompletion(file string) error {
+ Verbosef("writing zsh completion file to %v\n", file)
+ return cmdRoot.GenZshCompletionFile(file)
+}
+
+func runGenerate(cmd *cobra.Command, args []string) error {
+ if genOpts.ManDir != "" {
+ err := writeManpages(genOpts.ManDir)
+ if err != nil {
+ return err
+ }
+ }
+
+ if genOpts.BashCompletionFile != "" {
+ err := writeBashCompletion(genOpts.BashCompletionFile)
+ if err != nil {
+ return err
+ }
+ }
+
+ if genOpts.ZSHCompletionFile != "" {
+ err := writeZSHCompletion(genOpts.ZSHCompletionFile)
+ if err != nil {
+ return err
+ }
+ }
+
+ var empty generateOptions
+ if genOpts == empty {
+ return errors.Fatal("nothing to do, please specify at least one output file/dir")
+ }
+
+ return nil
+}
diff --git a/cmd/restic/cmd_manpage.go b/cmd/restic/cmd_manpage.go
deleted file mode 100644
index 1d39f4242..000000000
--- a/cmd/restic/cmd_manpage.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package main
-
-import (
- "os"
- "time"
-
- "github.com/restic/restic/internal/errors"
- "github.com/spf13/cobra"
- "github.com/spf13/cobra/doc"
-)
-
-var cmdManpage = &cobra.Command{
- Use: "manpage [command]",
- Short: "Generate manual pages",
- Long: `
-The "manpage" command generates a manual page for a single command. It can also
-be used to write all manual pages to a directory. If the output directory is
-set and no command is specified, all manpages are written to the directory.
-`,
- DisableAutoGenTag: true,
- RunE: runManpage,
-}
-
-var manpageOpts = struct {
- OutputDir string
-}{}
-
-func init() {
- cmdRoot.AddCommand(cmdManpage)
- fs := cmdManpage.Flags()
- fs.StringVar(&manpageOpts.OutputDir, "output-dir", "", "write man pages to this `directory`")
-}
-
-func runManpage(cmd *cobra.Command, args []string) error {
- // use a fixed date for the man pages so that generating them is deterministic
- date, err := time.Parse("Jan 2006", "Jan 2017")
- if err != nil {
- return err
- }
-
- header := &doc.GenManHeader{
- Title: "restic backup",
- Section: "1",
- Source: "generated by `restic manpage`",
- Date: &date,
- }
-
- dir := manpageOpts.OutputDir
- if dir != "" {
- Verbosef("writing man pages to directory %v\n", dir)
- return doc.GenManTree(cmdRoot, header, dir)
- }
-
- switch {
- case len(args) == 0:
- return errors.Fatalf("no command given")
- case len(args) > 1:
- return errors.Fatalf("more than one command given: %v", args)
- }
-
- name := args[0]
-
- for _, cmd := range cmdRoot.Commands() {
- if cmd.Name() == name {
- return doc.GenMan(cmd, header, os.Stdout)
- }
- }
-
- return errors.Fatalf("command %q is not known", args)
-}
diff --git a/cmd/restic/cmd_mount.go b/cmd/restic/cmd_mount.go
index 8f24fdcc9..b38d13cdb 100644
--- a/cmd/restic/cmd_mount.go
+++ b/cmd/restic/cmd_mount.go
@@ -67,6 +67,12 @@ func mount(opts MountOptions, gopts GlobalOptions, mountpoint string) error {
return err
}
+ lock, err := lockRepo(repo)
+ defer unlockRepo(lock)
+ if err != nil {
+ return err
+ }
+
err = repo.LoadIndex(context.TODO())
if err != nil {
return err
diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go
index 8e3c5b53d..1383d15a4 100644
--- a/cmd/restic/cmd_prune.go
+++ b/cmd/restic/cmd_prune.go
@@ -85,6 +85,25 @@ func runPrune(gopts GlobalOptions) error {
return pruneRepository(gopts, repo)
}
+func mixedBlobs(list []restic.Blob) bool {
+ var tree, data bool
+
+ for _, pb := range list {
+ switch pb.Type {
+ case restic.TreeBlob:
+ tree = true
+ case restic.DataBlob:
+ data = true
+ }
+
+ if tree && data {
+ return true
+ }
+ }
+
+ return false
+}
+
func pruneRepository(gopts GlobalOptions, repo restic.Repository) error {
ctx := gopts.ctx
@@ -122,7 +141,7 @@ func pruneRepository(gopts GlobalOptions, repo restic.Repository) error {
stats.bytes += pack.Size
blobs += len(pack.Entries)
}
- Verbosef("repository contains %v packs (%v blobs) with %v bytes\n",
+ Verbosef("repository contains %v packs (%v blobs) with %v\n",
len(idx.Packs), blobs, formatBytes(uint64(stats.bytes)))
blobCount := make(map[restic.BlobHandle]int)
@@ -191,6 +210,11 @@ func pruneRepository(gopts GlobalOptions, repo restic.Repository) error {
// find packs that need a rewrite
rewritePacks := restic.NewIDSet()
for _, pack := range idx.Packs {
+ if mixedBlobs(pack.Entries) {
+ rewritePacks.Insert(pack.ID)
+ continue
+ }
+
for _, blob := range pack.Entries {
h := restic.BlobHandle{ID: blob.ID, Type: blob.Type}
if !usedBlobs.Has(h) {
diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go
index 7e8530b31..5dfb45e97 100644
--- a/cmd/restic/cmd_snapshots.go
+++ b/cmd/restic/cmd_snapshots.go
@@ -6,6 +6,7 @@ import (
"fmt"
"io"
"sort"
+ "strings"
"github.com/restic/restic/internal/restic"
"github.com/spf13/cobra"
@@ -29,6 +30,7 @@ type SnapshotOptions struct {
Tags restic.TagLists
Paths []string
Compact bool
+ Last bool
}
var snapshotOptions SnapshotOptions
@@ -41,6 +43,7 @@ func init() {
f.Var(&snapshotOptions.Tags, "tag", "only consider snapshots which include this `taglist` (can be specified multiple times)")
f.StringArrayVar(&snapshotOptions.Paths, "path", nil, "only consider snapshots for this `path` (can be specified multiple times)")
f.BoolVarP(&snapshotOptions.Compact, "compact", "c", false, "use compact format")
+ f.BoolVar(&snapshotOptions.Last, "last", false, "only show the last snapshot for each host and path")
}
func runSnapshots(opts SnapshotOptions, gopts GlobalOptions, args []string) error {
@@ -64,6 +67,11 @@ func runSnapshots(opts SnapshotOptions, gopts GlobalOptions, args []string) erro
for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, args) {
list = append(list, sn)
}
+
+ if opts.Last {
+ list = FilterLastSnapshots(list)
+ }
+
sort.Sort(sort.Reverse(list))
if gopts.JSON {
@@ -78,9 +86,50 @@ func runSnapshots(opts SnapshotOptions, gopts GlobalOptions, args []string) erro
return nil
}
+// filterLastSnapshotsKey is used by FilterLastSnapshots.
+type filterLastSnapshotsKey struct {
+ Hostname string
+ JoinedPaths string
+}
+
+// newFilterLastSnapshotsKey initializes a filterLastSnapshotsKey from a Snapshot
+func newFilterLastSnapshotsKey(sn *restic.Snapshot) filterLastSnapshotsKey {
+ // Shallow slice copy
+ var paths = make([]string, len(sn.Paths))
+ copy(paths, sn.Paths)
+ sort.Strings(paths)
+ return filterLastSnapshotsKey{sn.Hostname, strings.Join(paths, "|")}
+}
+
+// FilterLastSnapshots filters a list of snapshots to only return the last
+// entry for each hostname and path. If the snapshot contains multiple paths,
+// they will be joined and treated as one item.
+func FilterLastSnapshots(list restic.Snapshots) restic.Snapshots {
+ // Sort the snapshots so that the newer ones are listed first
+ sort.SliceStable(list, func(i, j int) bool {
+ return list[i].Time.After(list[j].Time)
+ })
+
+ var results restic.Snapshots
+ seen := make(map[filterLastSnapshotsKey]bool)
+ for _, sn := range list {
+ key := newFilterLastSnapshotsKey(sn)
+ if !seen[key] {
+ seen[key] = true
+ results = append(results, sn)
+ }
+ }
+ return results
+}
+
// PrintSnapshots prints a text table of the snapshots in list to stdout.
func PrintSnapshots(stdout io.Writer, list restic.Snapshots, compact bool) {
+ // always sort the snapshots so that the newer ones are listed last
+ sort.SliceStable(list, func(i, j int) bool {
+ return list[i].Time.Before(list[j].Time)
+ })
+
// Determine the max widths for host and tag.
maxHost, maxTag := 10, 6
for _, sn := range list {
@@ -158,6 +207,8 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, compact bool) {
}
}
+ tab.Footer = fmt.Sprintf("%d snapshots", len(list))
+
tab.Write(stdout)
}
@@ -165,7 +216,8 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, compact bool) {
type Snapshot struct {
*restic.Snapshot
- ID *restic.ID `json:"id"`
+ ID *restic.ID `json:"id"`
+ ShortID string `json:"short_id"`
}
// printSnapshotsJSON writes the JSON representation of list to stdout.
@@ -178,6 +230,7 @@ func printSnapshotsJSON(stdout io.Writer, list restic.Snapshots) error {
k := Snapshot{
Snapshot: sn,
ID: sn.ID(),
+ ShortID: sn.ID().Str(),
}
snapshots = append(snapshots, k)
}
diff --git a/cmd/restic/cmd_tag.go b/cmd/restic/cmd_tag.go
index 2c70399e5..a07d627c5 100644
--- a/cmd/restic/cmd_tag.go
+++ b/cmd/restic/cmd_tag.go
@@ -113,7 +113,7 @@ func runTag(opts TagOptions, gopts GlobalOptions, args []string) error {
}
if !gopts.NoLock {
- Verbosef("Create exclusive lock for repository\n")
+ Verbosef("create exclusive lock for repository\n")
lock, err := lockRepoExclusive(repo)
defer unlockRepo(lock)
if err != nil {
@@ -135,9 +135,9 @@ func runTag(opts TagOptions, gopts GlobalOptions, args []string) error {
}
}
if changeCnt == 0 {
- Verbosef("No snapshots were modified\n")
+ Verbosef("no snapshots were modified\n")
} else {
- Verbosef("Modified tags on %v snapshots\n", changeCnt)
+ Verbosef("modified tags on %v snapshots\n", changeCnt)
}
return nil
}
diff --git a/cmd/restic/exclude.go b/cmd/restic/exclude.go
index 369c4df9a..0a6d8bcec 100644
--- a/cmd/restic/exclude.go
+++ b/cmd/restic/exclude.go
@@ -7,13 +7,59 @@ import (
"os"
"path/filepath"
"strings"
+ "sync"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/filter"
"github.com/restic/restic/internal/fs"
+ "github.com/restic/restic/internal/repository"
)
+type rejectionCache struct {
+ m map[string]bool
+ mtx sync.Mutex
+}
+
+// Lock locks the mutex in rc.
+func (rc *rejectionCache) Lock() {
+ if rc != nil {
+ rc.mtx.Lock()
+ }
+}
+
+// Unlock unlocks the mutex in rc.
+func (rc *rejectionCache) Unlock() {
+ if rc != nil {
+ rc.mtx.Unlock()
+ }
+}
+
+// Get returns the last stored value for dir and a second boolean that
+// indicates whether that value was actually written to the cache. It is the
+// callers responsibility to call rc.Lock and rc.Unlock before using this
+// method, otherwise data races may occur.
+func (rc *rejectionCache) Get(dir string) (bool, bool) {
+ if rc == nil || rc.m == nil {
+ return false, false
+ }
+ v, ok := rc.m[dir]
+ return v, ok
+}
+
+// Store stores a new value for dir. It is the callers responsibility to call
+// rc.Lock and rc.Unlock before using this method, otherwise data races may
+// occur.
+func (rc *rejectionCache) Store(dir string, rejected bool) {
+ if rc == nil {
+ return
+ }
+ if rc.m == nil {
+ rc.m = make(map[string]bool)
+ }
+ rc.m[dir] = rejected
+}
+
// RejectFunc is a function that takes a filename and os.FileInfo of a
// file that would be included in the backup. The function returns true if it
// should be excluded (rejected) from the backup.
@@ -41,8 +87,10 @@ func rejectByPattern(patterns []string) RejectFunc {
// should be excluded. The RejectFunc considers a file to be excluded when
// it resides in a directory with an exclusion file, that is specified by
// excludeFileSpec in the form "filename[:content]". The returned error is
-// non-nil if the filename component of excludeFileSpec is empty.
-func rejectIfPresent(excludeFileSpec string) (RejectFunc, error) {
+// non-nil if the filename component of excludeFileSpec is empty. If rc is
+// non-nil, it is going to be used in the RejectFunc to expedite the evaluation
+// of a directory based on previous visits.
+func rejectIfPresent(excludeFileSpec string, rc *rejectionCache) (RejectFunc, error) {
if excludeFileSpec == "" {
return nil, errors.New("name for exclusion tagfile is empty")
}
@@ -59,15 +107,17 @@ func rejectIfPresent(excludeFileSpec string) (RejectFunc, error) {
}
debug.Log("using %q as exclusion tagfile", tf)
fn := func(filename string, _ os.FileInfo) bool {
- return isExcludedByFile(filename, tf, tc)
+ return isExcludedByFile(filename, tf, tc, rc)
}
return fn, nil
}
// isExcludedByFile interprets filename as a path and returns true if that file
// is in a excluded directory. A directory is identified as excluded if it contains a
-// tagfile which bears the name specified in tagFilename and starts with header.
-func isExcludedByFile(filename, tagFilename, header string) bool {
+// tagfile which bears the name specified in tagFilename and starts with
+// header. If rc is non-nil, it is used to expedite the evaluation of a
+// directory based on previous visits.
+func isExcludedByFile(filename, tagFilename, header string, rc *rejectionCache) bool {
if tagFilename == "" {
return false
}
@@ -75,6 +125,19 @@ func isExcludedByFile(filename, tagFilename, header string) bool {
if base == tagFilename {
return false // do not exclude the tagfile itself
}
+ rc.Lock()
+ defer rc.Unlock()
+
+ rejected, visited := rc.Get(dir)
+ if visited {
+ return rejected
+ }
+ rejected = isDirExcludedByFile(dir, tagFilename, header)
+ rc.Store(dir, rejected)
+ return rejected
+}
+
+func isDirExcludedByFile(dir, tagFilename, header string) bool {
tf := filepath.Join(dir, tagFilename)
_, err := fs.Lstat(tf)
if os.IsNotExist(err) {
@@ -177,3 +240,27 @@ func rejectByDevice(samples []string) (RejectFunc, error) {
panic(fmt.Sprintf("item %v, device id %v not found, allowedDevs: %v", item, id, allowed))
}, nil
}
+
+// rejectResticCache returns a RejectFunc that rejects the restic cache
+// directory (if set).
+func rejectResticCache(repo *repository.Repository) (RejectFunc, error) {
+ if repo.Cache == nil {
+ return func(string, os.FileInfo) bool {
+ return false
+ }, nil
+ }
+ cacheBase := repo.Cache.BaseDir()
+
+ if cacheBase == "" {
+ return nil, errors.New("cacheBase is empty string")
+ }
+
+ return func(item string, _ os.FileInfo) bool {
+ if fs.HasPathPrefix(cacheBase, item) {
+ debug.Log("rejecting restic cache directory %v", item)
+ return true
+ }
+
+ return false
+ }, nil
+}
diff --git a/cmd/restic/exclude_test.go b/cmd/restic/exclude_test.go
index ec9b0bade..5cadf6e9b 100644
--- a/cmd/restic/exclude_test.go
+++ b/cmd/restic/exclude_test.go
@@ -76,7 +76,7 @@ func TestIsExcludedByFile(t *testing.T) {
if tc.content == "" {
h = ""
}
- if got := isExcludedByFile(foo, tagFilename, h); tc.want != got {
+ if got := isExcludedByFile(foo, tagFilename, h, nil); tc.want != got {
t.Fatalf("expected %v, got %v", tc.want, got)
}
})
diff --git a/cmd/restic/excludes b/cmd/restic/excludes
new file mode 100644
index 000000000..ab2f4fd31
--- /dev/null
+++ b/cmd/restic/excludes
@@ -0,0 +1,31 @@
+/boot
+/dev
+/etc
+/home
+/lost+found
+/mnt
+/proc
+/root
+/run
+/sys
+/tmp
+/usr
+/var
+/opt/android-sdk
+/opt/bullet
+/opt/dex2jar
+/opt/jameica
+/opt/google
+/opt/JDownloader
+/opt/JDownloaderScripts
+/opt/opencascade
+/opt/vagrant
+/opt/visual-studio-code
+/opt/vtk6
+/bin
+/fonts*
+/srv/ftp
+/srv/http
+/sbin
+/lib
+/lib64
diff --git a/cmd/restic/global.go b/cmd/restic/global.go
index ccfe1b9c0..70d031289 100644
--- a/cmd/restic/global.go
+++ b/cmd/restic/global.go
@@ -9,7 +9,9 @@ import (
"runtime"
"strings"
"syscall"
+ "time"
+ "github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/azure"
"github.com/restic/restic/internal/backend/b2"
"github.com/restic/restic/internal/backend/gs"
@@ -19,7 +21,9 @@ import (
"github.com/restic/restic/internal/backend/s3"
"github.com/restic/restic/internal/backend/sftp"
"github.com/restic/restic/internal/backend/swift"
+ "github.com/restic/restic/internal/cache"
"github.com/restic/restic/internal/debug"
+ "github.com/restic/restic/internal/limiter"
"github.com/restic/restic/internal/options"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
@@ -38,6 +42,12 @@ type GlobalOptions struct {
Quiet bool
NoLock bool
JSON bool
+ CacheDir string
+ NoCache bool
+ CACerts []string
+
+ LimitUploadKb int
+ LimitDownloadKb int
ctx context.Context
password string
@@ -68,7 +78,11 @@ func init() {
f.BoolVarP(&globalOptions.Quiet, "quiet", "q", false, "do not output comprehensive progress report")
f.BoolVar(&globalOptions.NoLock, "no-lock", false, "do not lock the repo, this allows some operations on read-only repos")
f.BoolVarP(&globalOptions.JSON, "json", "", false, "set output mode to JSON for commands that support it")
-
+ f.StringVar(&globalOptions.CacheDir, "cache-dir", "", "set the cache directory")
+ f.BoolVar(&globalOptions.NoCache, "no-cache", false, "do not use a local cache")
+ f.StringSliceVar(&globalOptions.CACerts, "cacert", nil, "path to load root certificates from (default: use system certificates)")
+ f.IntVar(&globalOptions.LimitUploadKb, "limit-upload", 0, "limits uploads to a maximum rate in KiB/s. (default: unlimited)")
+ f.IntVar(&globalOptions.LimitDownloadKb, "limit-download", 0, "limits downloads to a maximum rate in KiB/s. (default: unlimited)")
f.StringSliceVarP(&globalOptions.Options, "option", "o", []string{}, "set extended option (`key=value`, can be specified multiple times)")
restoreTerminal()
@@ -310,6 +324,15 @@ func OpenRepository(opts GlobalOptions) (*repository.Repository, error) {
return nil, err
}
+ if opts.LimitUploadKb > 0 || opts.LimitDownloadKb > 0 {
+ debug.Log("rate limiting backend to %d KiB/s upload and %d KiB/s download", opts.LimitUploadKb, opts.LimitDownloadKb)
+ be = limiter.LimitBackend(be, limiter.NewStaticLimiter(opts.LimitUploadKb, opts.LimitDownloadKb))
+ }
+
+ be = backend.NewRetryBackend(be, 10, func(msg string, err error, d time.Duration) {
+ Warnf("%v returned error, retrying after %v: %v\n", msg, d, err)
+ })
+
s := repository.New(be)
opts.password, err = ReadPassword(opts, "enter password for repository: ")
@@ -322,6 +345,21 @@ func OpenRepository(opts GlobalOptions) (*repository.Repository, error) {
return nil, err
}
+ if stdoutIsTerminal() {
+ Verbosef("password is correct\n")
+ }
+
+ if opts.NoCache {
+ return s, nil
+ }
+
+ cache, err := cache.New(s.Config().ID, opts.CacheDir)
+ if err != nil {
+ Warnf("unable to open cache: %v\n", err)
+ } else {
+ s.UseCache(cache)
+ }
+
return s, nil
}
@@ -466,23 +504,28 @@ func open(s string, opts options.Options) (restic.Backend, error) {
return nil, err
}
+ rt, err := backend.Transport(globalOptions.CACerts)
+ if err != nil {
+ return nil, err
+ }
+
switch loc.Scheme {
case "local":
be, err = local.Open(cfg.(local.Config))
case "sftp":
- be, err = sftp.Open(cfg.(sftp.Config))
+ be, err = sftp.Open(cfg.(sftp.Config), SuspendSignalHandler, InstallSignalHandler)
case "s3":
- be, err = s3.Open(cfg.(s3.Config))
+ be, err = s3.Open(cfg.(s3.Config), rt)
case "gs":
be, err = gs.Open(cfg.(gs.Config))
case "azure":
- be, err = azure.Open(cfg.(azure.Config))
+ be, err = azure.Open(cfg.(azure.Config), rt)
case "swift":
- be, err = swift.Open(cfg.(swift.Config))
+ be, err = swift.Open(cfg.(swift.Config), rt)
case "b2":
- be, err = b2.Open(cfg.(b2.Config))
+ be, err = b2.Open(cfg.(b2.Config), rt)
case "rest":
- be, err = rest.Open(cfg.(rest.Config))
+ be, err = rest.Open(cfg.(rest.Config), rt)
default:
return nil, errors.Fatalf("invalid backend: %q", loc.Scheme)
@@ -518,23 +561,28 @@ func create(s string, opts options.Options) (restic.Backend, error) {
return nil, err
}
+ rt, err := backend.Transport(globalOptions.CACerts)
+ if err != nil {
+ return nil, err
+ }
+
switch loc.Scheme {
case "local":
return local.Create(cfg.(local.Config))
case "sftp":
- return sftp.Create(cfg.(sftp.Config))
+ return sftp.Create(cfg.(sftp.Config), SuspendSignalHandler, InstallSignalHandler)
case "s3":
- return s3.Create(cfg.(s3.Config))
+ return s3.Create(cfg.(s3.Config), rt)
case "gs":
return gs.Create(cfg.(gs.Config))
case "azure":
- return azure.Create(cfg.(azure.Config))
+ return azure.Create(cfg.(azure.Config), rt)
case "swift":
- return swift.Open(cfg.(swift.Config))
+ return swift.Open(cfg.(swift.Config), rt)
case "b2":
- return b2.Create(cfg.(b2.Config))
+ return b2.Create(cfg.(b2.Config), rt)
case "rest":
- return rest.Create(cfg.(rest.Config))
+ return rest.Create(cfg.(rest.Config), rt)
}
debug.Log("invalid repository scheme: %v", s)
diff --git a/cmd/restic/integration_fuse_test.go b/cmd/restic/integration_fuse_test.go
index 4d70212a5..d4922b8b9 100644
--- a/cmd/restic/integration_fuse_test.go
+++ b/cmd/restic/integration_fuse_test.go
@@ -13,7 +13,7 @@ import (
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
- . "github.com/restic/restic/internal/test"
+ rtest "github.com/restic/restic/internal/test"
)
const (
@@ -56,7 +56,7 @@ func waitForMount(t testing.TB, dir string) {
func testRunMount(t testing.TB, gopts GlobalOptions, dir string) {
opts := MountOptions{}
- OK(t, runMount(opts, gopts, []string{dir}))
+ rtest.OK(t, runMount(opts, gopts, []string{dir}))
}
func testRunUmount(t testing.TB, gopts GlobalOptions, dir string) {
@@ -75,14 +75,14 @@ func testRunUmount(t testing.TB, gopts GlobalOptions, dir string) {
func listSnapshots(t testing.TB, dir string) []string {
snapshotsDir, err := os.Open(filepath.Join(dir, "snapshots"))
- OK(t, err)
+ rtest.OK(t, err)
names, err := snapshotsDir.Readdirnames(-1)
- OK(t, err)
- OK(t, snapshotsDir.Close())
+ rtest.OK(t, err)
+ rtest.OK(t, snapshotsDir.Close())
return names
}
-func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Repository, mountpoint, repodir string, snapshotIDs restic.IDs) {
+func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Repository, mountpoint, repodir string, snapshotIDs restic.IDs, expectedSnapshotsInFuseDir int) {
t.Logf("checking for %d snapshots: %v", len(snapshotIDs), snapshotIDs)
go testRunMount(t, global, mountpoint)
@@ -98,18 +98,28 @@ func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Reposit
namesInSnapshots := listSnapshots(t, mountpoint)
t.Logf("found %v snapshots in fuse mount: %v", len(namesInSnapshots), namesInSnapshots)
- Assert(t,
- len(namesInSnapshots) == len(snapshotIDs),
- "Invalid number of snapshots: expected %d, got %d", len(snapshotIDs), len(namesInSnapshots))
+ rtest.Assert(t,
+ expectedSnapshotsInFuseDir == len(namesInSnapshots),
+ "Invalid number of snapshots: expected %d, got %d", expectedSnapshotsInFuseDir, len(namesInSnapshots))
namesMap := make(map[string]bool)
for _, name := range namesInSnapshots {
namesMap[name] = false
}
+ // Is "latest" present?
+ if len(namesMap) != 0 {
+ _, ok := namesMap["latest"]
+ if !ok {
+ t.Errorf("Symlink latest isn't present in fuse dir")
+ } else {
+ namesMap["latest"] = true
+ }
+ }
+
for _, id := range snapshotIDs {
snapshot, err := restic.LoadSnapshot(context.TODO(), repo, id)
- OK(t, err)
+ rtest.OK(t, err)
ts := snapshot.Time.Format(time.RFC3339)
present, ok := namesMap[ts]
@@ -133,12 +143,12 @@ func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Reposit
}
for name, present := range namesMap {
- Assert(t, present, "Directory %s is present in fuse dir but is not a snapshot", name)
+ rtest.Assert(t, present, "Directory %s is present in fuse dir but is not a snapshot", name)
}
}
func TestMount(t *testing.T) {
- if !RunFuseTest {
+ if !rtest.RunFuseTest {
t.Skip("Skipping fuse tests")
}
@@ -148,53 +158,53 @@ func TestMount(t *testing.T) {
testRunInit(t, env.gopts)
repo, err := OpenRepository(env.gopts)
- OK(t, err)
+ rtest.OK(t, err)
// We remove the mountpoint now to check that cmdMount creates it
- RemoveAll(t, env.mountpoint)
+ rtest.RemoveAll(t, env.mountpoint)
- checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, []restic.ID{})
+ checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, []restic.ID{}, 0)
- SetupTarTestFixture(t, env.testdata, filepath.Join("testdata", "backup-data.tar.gz"))
+ rtest.SetupTarTestFixture(t, env.testdata, filepath.Join("testdata", "backup-data.tar.gz"))
// first backup
testRunBackup(t, []string{env.testdata}, BackupOptions{}, env.gopts)
snapshotIDs := testRunList(t, "snapshots", env.gopts)
- Assert(t, len(snapshotIDs) == 1,
+ rtest.Assert(t, len(snapshotIDs) == 1,
"expected one snapshot, got %v", snapshotIDs)
- checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs)
+ checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 2)
// second backup, implicit incremental
testRunBackup(t, []string{env.testdata}, BackupOptions{}, env.gopts)
snapshotIDs = testRunList(t, "snapshots", env.gopts)
- Assert(t, len(snapshotIDs) == 2,
+ rtest.Assert(t, len(snapshotIDs) == 2,
"expected two snapshots, got %v", snapshotIDs)
- checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs)
+ checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 3)
// third backup, explicit incremental
bopts := BackupOptions{Parent: snapshotIDs[0].String()}
testRunBackup(t, []string{env.testdata}, bopts, env.gopts)
snapshotIDs = testRunList(t, "snapshots", env.gopts)
- Assert(t, len(snapshotIDs) == 3,
+ rtest.Assert(t, len(snapshotIDs) == 3,
"expected three snapshots, got %v", snapshotIDs)
- checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs)
+ checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 4)
}
func TestMountSameTimestamps(t *testing.T) {
- if !RunFuseTest {
+ if !rtest.RunFuseTest {
t.Skip("Skipping fuse tests")
}
env, cleanup := withTestEnvironment(t)
defer cleanup()
- SetupTarTestFixture(t, env.base, filepath.Join("testdata", "repo-same-timestamps.tar.gz"))
+ rtest.SetupTarTestFixture(t, env.base, filepath.Join("testdata", "repo-same-timestamps.tar.gz"))
repo, err := OpenRepository(env.gopts)
- OK(t, err)
+ rtest.OK(t, err)
ids := []restic.ID{
restic.TestParseID("280303689e5027328889a06d718b729e96a1ce6ae9ef8290bff550459ae611ee"),
@@ -202,5 +212,5 @@ func TestMountSameTimestamps(t *testing.T) {
restic.TestParseID("5fd0d8b2ef0fa5d23e58f1e460188abb0f525c0f0c4af8365a1280c807a80a1b"),
}
- checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, ids)
+ checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, ids, 4)
}
diff --git a/cmd/restic/integration_helpers_test.go b/cmd/restic/integration_helpers_test.go
index 3d0ad51ba..2fb026512 100644
--- a/cmd/restic/integration_helpers_test.go
+++ b/cmd/restic/integration_helpers_test.go
@@ -11,7 +11,7 @@ import (
"github.com/restic/restic/internal/options"
"github.com/restic/restic/internal/repository"
- . "github.com/restic/restic/internal/test"
+ rtest "github.com/restic/restic/internal/test"
)
type dirEntry struct {
@@ -71,7 +71,17 @@ func sameModTime(fi1, fi2 os.FileInfo) bool {
}
}
- return fi1.ModTime().Equal(fi2.ModTime())
+ same := fi1.ModTime().Equal(fi2.ModTime())
+ if !same && (runtime.GOOS == "darwin" || runtime.GOOS == "openbsd") {
+ // Allow up to 1μs difference, because macOS <10.13 cannot restore
+ // with nanosecond precision and the current version of Go (1.9.2)
+ // does not yet support the new syscall. (#1087)
+ mt1 := fi1.ModTime()
+ mt2 := fi2.ModTime()
+ usecDiff := (mt1.Nanosecond()-mt2.Nanosecond())/1000 + (mt1.Second()-mt2.Second())*1000000
+ same = usecDiff <= 1 && usecDiff >= -1
+ }
+ return same
}
// directoriesEqualContents checks if both directories contain exactly the same
@@ -174,14 +184,14 @@ type testEnvironment struct {
// withTestEnvironment creates a test environment and returns a cleanup
// function which removes it.
func withTestEnvironment(t testing.TB) (env *testEnvironment, cleanup func()) {
- if !RunIntegrationTest {
+ if !rtest.RunIntegrationTest {
t.Skip("integration tests disabled")
}
repository.TestUseLowSecurityKDFParameters(t)
- tempdir, err := ioutil.TempDir(TestTempDir, "restic-test-")
- OK(t, err)
+ tempdir, err := ioutil.TempDir(rtest.TestTempDir, "restic-test-")
+ rtest.OK(t, err)
env = &testEnvironment{
base: tempdir,
@@ -191,16 +201,17 @@ func withTestEnvironment(t testing.TB) (env *testEnvironment, cleanup func()) {
mountpoint: filepath.Join(tempdir, "mount"),
}
- OK(t, os.MkdirAll(env.mountpoint, 0700))
- OK(t, os.MkdirAll(env.testdata, 0700))
- OK(t, os.MkdirAll(env.cache, 0700))
- OK(t, os.MkdirAll(env.repo, 0700))
+ rtest.OK(t, os.MkdirAll(env.mountpoint, 0700))
+ rtest.OK(t, os.MkdirAll(env.testdata, 0700))
+ rtest.OK(t, os.MkdirAll(env.cache, 0700))
+ rtest.OK(t, os.MkdirAll(env.repo, 0700))
env.gopts = GlobalOptions{
Repo: env.repo,
Quiet: true,
+ CacheDir: env.cache,
ctx: context.Background(),
- password: TestPassword,
+ password: rtest.TestPassword,
stdout: os.Stdout,
stderr: os.Stderr,
extended: make(options.Options),
@@ -210,11 +221,11 @@ func withTestEnvironment(t testing.TB) (env *testEnvironment, cleanup func()) {
globalOptions = env.gopts
cleanup = func() {
- if !TestCleanupTempDirs {
+ if !rtest.TestCleanupTempDirs {
t.Logf("leaving temporary directory %v used for test", tempdir)
return
}
- RemoveAll(t, tempdir)
+ rtest.RemoveAll(t, tempdir)
}
return env, cleanup
diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go
index 449b1c93e..dbc48703e 100644
--- a/cmd/restic/integration_test.go
+++ b/cmd/restic/integration_test.go
@@ -17,13 +17,12 @@ import (
"testing"
"time"
- "github.com/restic/restic/internal/errors"
- "github.com/restic/restic/internal/restic"
-
"github.com/restic/restic/internal/debug"
+ "github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/filter"
"github.com/restic/restic/internal/repository"
- . "github.com/restic/restic/internal/test"
+ "github.com/restic/restic/internal/restic"
+ rtest "github.com/restic/restic/internal/test"
)
func parseIDsFromReader(t testing.TB, rd io.Reader) restic.IDs {
@@ -47,13 +46,13 @@ func testRunInit(t testing.TB, opts GlobalOptions) {
repository.TestUseLowSecurityKDFParameters(t)
restic.TestSetLockTimeout(t, 0)
- OK(t, runInit(opts, nil))
+ rtest.OK(t, runInit(opts, nil))
t.Logf("repository initialized at %v", opts.Repo)
}
func testRunBackup(t testing.TB, target []string, opts BackupOptions, gopts GlobalOptions) {
t.Logf("backing up %v", target)
- OK(t, runBackup(opts, gopts, target))
+ rtest.OK(t, runBackup(opts, gopts, target))
}
func testRunList(t testing.TB, tpe string, opts GlobalOptions) restic.IDs {
@@ -63,7 +62,7 @@ func testRunList(t testing.TB, tpe string, opts GlobalOptions) restic.IDs {
globalOptions.stdout = os.Stdout
}()
- OK(t, runList(opts, []string{tpe}))
+ rtest.OK(t, runList(opts, []string{tpe}))
return parseIDsFromReader(t, buf)
}
@@ -78,7 +77,7 @@ func testRunRestoreLatest(t testing.TB, gopts GlobalOptions, dir string, paths [
Paths: paths,
}
- OK(t, runRestore(opts, gopts, []string{"latest"}))
+ rtest.OK(t, runRestore(opts, gopts, []string{"latest"}))
}
func testRunRestoreExcludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, excludes []string) {
@@ -87,7 +86,7 @@ func testRunRestoreExcludes(t testing.TB, gopts GlobalOptions, dir string, snaps
Exclude: excludes,
}
- OK(t, runRestore(opts, gopts, []string{snapshotID.String()}))
+ rtest.OK(t, runRestore(opts, gopts, []string{snapshotID.String()}))
}
func testRunRestoreIncludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, includes []string) {
@@ -96,7 +95,7 @@ func testRunRestoreIncludes(t testing.TB, gopts GlobalOptions, dir string, snaps
Include: includes,
}
- OK(t, runRestore(opts, gopts, []string{snapshotID.String()}))
+ rtest.OK(t, runRestore(opts, gopts, []string{snapshotID.String()}))
}
func testRunCheck(t testing.TB, gopts GlobalOptions) {
@@ -104,7 +103,7 @@ func testRunCheck(t testing.TB, gopts GlobalOptions) {
ReadData: true,
CheckUnused: true,
}
- OK(t, runCheck(opts, gopts, nil))
+ rtest.OK(t, runCheck(opts, gopts, nil))
}
func testRunCheckOutput(gopts GlobalOptions) (string, error) {
@@ -129,7 +128,7 @@ func testRunRebuildIndex(t testing.TB, gopts GlobalOptions) {
globalOptions.stdout = os.Stdout
}()
- OK(t, runRebuildIndex(gopts))
+ rtest.OK(t, runRebuildIndex(gopts))
}
func testRunLs(t testing.TB, gopts GlobalOptions, snapshotID string) []string {
@@ -144,7 +143,7 @@ func testRunLs(t testing.TB, gopts GlobalOptions, snapshotID string) []string {
opts := LsOptions{}
- OK(t, runLs(opts, gopts, []string{snapshotID}))
+ rtest.OK(t, runLs(opts, gopts, []string{snapshotID}))
return strings.Split(string(buf.Bytes()), "\n")
}
@@ -160,7 +159,7 @@ func testRunFind(t testing.TB, wantJSON bool, gopts GlobalOptions, pattern strin
opts := FindOptions{}
- OK(t, runFind(opts, gopts, []string{pattern}))
+ rtest.OK(t, runFind(opts, gopts, []string{pattern}))
return buf.Bytes()
}
@@ -176,10 +175,10 @@ func testRunSnapshots(t testing.TB, gopts GlobalOptions) (newest *Snapshot, snap
opts := SnapshotOptions{}
- OK(t, runSnapshots(opts, globalOptions, []string{}))
+ rtest.OK(t, runSnapshots(opts, globalOptions, []string{}))
snapshots := []Snapshot{}
- OK(t, json.Unmarshal(buf.Bytes(), &snapshots))
+ rtest.OK(t, json.Unmarshal(buf.Bytes(), &snapshots))
snapmap = make(map[restic.ID]Snapshot, len(snapshots))
for _, sn := range snapshots {
@@ -193,11 +192,11 @@ func testRunSnapshots(t testing.TB, gopts GlobalOptions) (newest *Snapshot, snap
func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) {
opts := ForgetOptions{}
- OK(t, runForget(opts, gopts, args))
+ rtest.OK(t, runForget(opts, gopts, args))
}
func testRunPrune(t testing.TB, gopts GlobalOptions) {
- OK(t, runPrune(gopts))
+ rtest.OK(t, runPrune(gopts))
}
func TestBackup(t *testing.T) {
@@ -210,18 +209,18 @@ func TestBackup(t *testing.T) {
t.Skipf("unable to find data file %q, skipping", datafile)
return
}
- OK(t, err)
- OK(t, fd.Close())
+ rtest.OK(t, err)
+ rtest.OK(t, fd.Close())
testRunInit(t, env.gopts)
- SetupTarTestFixture(t, env.testdata, datafile)
+ rtest.SetupTarTestFixture(t, env.testdata, datafile)
opts := BackupOptions{}
// first backup
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
snapshotIDs := testRunList(t, "snapshots", env.gopts)
- Assert(t, len(snapshotIDs) == 1,
+ rtest.Assert(t, len(snapshotIDs) == 1,
"expected one snapshot, got %v", snapshotIDs)
testRunCheck(t, env.gopts)
@@ -230,7 +229,7 @@ func TestBackup(t *testing.T) {
// second backup, implicit incremental
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
snapshotIDs = testRunList(t, "snapshots", env.gopts)
- Assert(t, len(snapshotIDs) == 2,
+ rtest.Assert(t, len(snapshotIDs) == 2,
"expected two snapshots, got %v", snapshotIDs)
stat2 := dirStats(env.repo)
@@ -244,7 +243,7 @@ func TestBackup(t *testing.T) {
opts.Parent = snapshotIDs[0].String()
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
snapshotIDs = testRunList(t, "snapshots", env.gopts)
- Assert(t, len(snapshotIDs) == 3,
+ rtest.Assert(t, len(snapshotIDs) == 3,
"expected three snapshots, got %v", snapshotIDs)
stat3 := dirStats(env.repo)
@@ -258,7 +257,7 @@ func TestBackup(t *testing.T) {
restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir)
testRunRestore(t, env.gopts, restoredir, snapshotIDs[0])
- Assert(t, directoriesEqualContents(env.testdata, filepath.Join(restoredir, "testdata")),
+ rtest.Assert(t, directoriesEqualContents(env.testdata, filepath.Join(restoredir, "testdata")),
"directories are not equal")
}
@@ -275,10 +274,10 @@ func TestBackupNonExistingFile(t *testing.T) {
t.Skipf("unable to find data file %q, skipping", datafile)
return
}
- OK(t, err)
- OK(t, fd.Close())
+ rtest.OK(t, err)
+ rtest.OK(t, fd.Close())
- SetupTarTestFixture(t, env.testdata, datafile)
+ rtest.SetupTarTestFixture(t, env.testdata, datafile)
testRunInit(t, env.gopts)
globalOptions.stderr = ioutil.Discard
@@ -309,10 +308,10 @@ func TestBackupMissingFile1(t *testing.T) {
t.Skipf("unable to find data file %q, skipping", datafile)
return
}
- OK(t, err)
- OK(t, fd.Close())
+ rtest.OK(t, err)
+ rtest.OK(t, fd.Close())
- SetupTarTestFixture(t, env.testdata, datafile)
+ rtest.SetupTarTestFixture(t, env.testdata, datafile)
testRunInit(t, env.gopts)
globalOptions.stderr = ioutil.Discard
@@ -331,7 +330,7 @@ func TestBackupMissingFile1(t *testing.T) {
t.Logf("in hook, removing test file testdata/0/0/9/37")
ranHook = true
- OK(t, os.Remove(filepath.Join(env.testdata, "0", "0", "9", "37")))
+ rtest.OK(t, os.Remove(filepath.Join(env.testdata, "0", "0", "9", "37")))
})
opts := BackupOptions{}
@@ -339,7 +338,7 @@ func TestBackupMissingFile1(t *testing.T) {
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
testRunCheck(t, env.gopts)
- Assert(t, ranHook, "hook did not run")
+ rtest.Assert(t, ranHook, "hook did not run")
debug.RemoveHook("pipe.walk1")
}
@@ -353,10 +352,10 @@ func TestBackupMissingFile2(t *testing.T) {
t.Skipf("unable to find data file %q, skipping", datafile)
return
}
- OK(t, err)
- OK(t, fd.Close())
+ rtest.OK(t, err)
+ rtest.OK(t, fd.Close())
- SetupTarTestFixture(t, env.testdata, datafile)
+ rtest.SetupTarTestFixture(t, env.testdata, datafile)
testRunInit(t, env.gopts)
@@ -376,7 +375,7 @@ func TestBackupMissingFile2(t *testing.T) {
t.Logf("in hook, removing test file testdata/0/0/9/37")
ranHook = true
- OK(t, os.Remove(filepath.Join(env.testdata, "0", "0", "9", "37")))
+ rtest.OK(t, os.Remove(filepath.Join(env.testdata, "0", "0", "9", "37")))
})
opts := BackupOptions{}
@@ -384,7 +383,7 @@ func TestBackupMissingFile2(t *testing.T) {
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
testRunCheck(t, env.gopts)
- Assert(t, ranHook, "hook did not run")
+ rtest.Assert(t, ranHook, "hook did not run")
debug.RemoveHook("pipe.walk2")
}
@@ -398,10 +397,10 @@ func TestBackupChangedFile(t *testing.T) {
t.Skipf("unable to find data file %q, skipping", datafile)
return
}
- OK(t, err)
- OK(t, fd.Close())
+ rtest.OK(t, err)
+ rtest.OK(t, fd.Close())
- SetupTarTestFixture(t, env.testdata, datafile)
+ rtest.SetupTarTestFixture(t, env.testdata, datafile)
testRunInit(t, env.gopts)
@@ -423,7 +422,7 @@ func TestBackupChangedFile(t *testing.T) {
t.Logf("in hook, modifying test file %v", modFile)
ranHook = true
- OK(t, ioutil.WriteFile(modFile, []byte("modified"), 0600))
+ rtest.OK(t, ioutil.WriteFile(modFile, []byte("modified"), 0600))
})
opts := BackupOptions{}
@@ -431,7 +430,7 @@ func TestBackupChangedFile(t *testing.T) {
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
testRunCheck(t, env.gopts)
- Assert(t, ranHook, "hook did not run")
+ rtest.Assert(t, ranHook, "hook did not run")
debug.RemoveHook("archiver.SaveFile")
}
@@ -445,10 +444,10 @@ func TestBackupDirectoryError(t *testing.T) {
t.Skipf("unable to find data file %q, skipping", datafile)
return
}
- OK(t, err)
- OK(t, fd.Close())
+ rtest.OK(t, err)
+ rtest.OK(t, fd.Close())
- SetupTarTestFixture(t, env.testdata, datafile)
+ rtest.SetupTarTestFixture(t, env.testdata, datafile)
testRunInit(t, env.gopts)
@@ -472,22 +471,22 @@ func TestBackupDirectoryError(t *testing.T) {
t.Logf("in hook, removing test file %v", testdir)
ranHook = true
- OK(t, os.RemoveAll(testdir))
+ rtest.OK(t, os.RemoveAll(testdir))
})
testRunBackup(t, []string{filepath.Join(env.testdata, "0", "0")}, BackupOptions{}, env.gopts)
testRunCheck(t, env.gopts)
- Assert(t, ranHook, "hook did not run")
+ rtest.Assert(t, ranHook, "hook did not run")
debug.RemoveHook("pipe.walk2")
snapshots := testRunList(t, "snapshots", env.gopts)
- Assert(t, len(snapshots) > 0,
+ rtest.Assert(t, len(snapshots) > 0,
"no snapshots found in repo (%v)", datafile)
files := testRunLs(t, env.gopts, snapshots[0].String())
- Assert(t, len(files) > 1, "snapshot is empty")
+ rtest.Assert(t, len(files) > 1, "snapshot is empty")
}
func includes(haystack []string, needle string) bool {
@@ -539,13 +538,13 @@ func TestBackupExclude(t *testing.T) {
for _, filename := range backupExcludeFilenames {
fp := filepath.Join(datadir, filename)
- OK(t, os.MkdirAll(filepath.Dir(fp), 0755))
+ rtest.OK(t, os.MkdirAll(filepath.Dir(fp), 0755))
f, err := os.Create(fp)
- OK(t, err)
+ rtest.OK(t, err)
fmt.Fprintf(f, filename)
- OK(t, f.Close())
+ rtest.OK(t, f.Close())
}
snapshots := make(map[string]struct{})
@@ -555,23 +554,23 @@ func TestBackupExclude(t *testing.T) {
testRunBackup(t, []string{datadir}, opts, env.gopts)
snapshots, snapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
files := testRunLs(t, env.gopts, snapshotID)
- Assert(t, includes(files, filepath.Join(string(filepath.Separator), "testdata", "foo.tar.gz")),
+ rtest.Assert(t, includes(files, filepath.Join(string(filepath.Separator), "testdata", "foo.tar.gz")),
"expected file %q in first snapshot, but it's not included", "foo.tar.gz")
opts.Excludes = []string{"*.tar.gz"}
testRunBackup(t, []string{datadir}, opts, env.gopts)
snapshots, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
files = testRunLs(t, env.gopts, snapshotID)
- Assert(t, !includes(files, filepath.Join(string(filepath.Separator), "testdata", "foo.tar.gz")),
+ rtest.Assert(t, !includes(files, filepath.Join(string(filepath.Separator), "testdata", "foo.tar.gz")),
"expected file %q not in first snapshot, but it's included", "foo.tar.gz")
opts.Excludes = []string{"*.tar.gz", "private/secret"}
testRunBackup(t, []string{datadir}, opts, env.gopts)
_, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
files = testRunLs(t, env.gopts, snapshotID)
- Assert(t, !includes(files, filepath.Join(string(filepath.Separator), "testdata", "foo.tar.gz")),
+ rtest.Assert(t, !includes(files, filepath.Join(string(filepath.Separator), "testdata", "foo.tar.gz")),
"expected file %q not in first snapshot, but it's included", "foo.tar.gz")
- Assert(t, !includes(files, filepath.Join(string(filepath.Separator), "testdata", "private", "secret", "passwords.txt")),
+ rtest.Assert(t, !includes(files, filepath.Join(string(filepath.Separator), "testdata", "private", "secret", "passwords.txt")),
"expected file %q not in first snapshot, but it's included", "passwords.txt")
}
@@ -612,7 +611,7 @@ func TestIncrementalBackup(t *testing.T) {
datadir := filepath.Join(env.base, "testdata")
testfile := filepath.Join(datadir, "testfile")
- OK(t, appendRandomData(testfile, incrementalFirstWrite))
+ rtest.OK(t, appendRandomData(testfile, incrementalFirstWrite))
opts := BackupOptions{}
@@ -620,7 +619,7 @@ func TestIncrementalBackup(t *testing.T) {
testRunCheck(t, env.gopts)
stat1 := dirStats(env.repo)
- OK(t, appendRandomData(testfile, incrementalSecondWrite))
+ rtest.OK(t, appendRandomData(testfile, incrementalSecondWrite))
testRunBackup(t, []string{datadir}, opts, env.gopts)
testRunCheck(t, env.gopts)
@@ -630,7 +629,7 @@ func TestIncrementalBackup(t *testing.T) {
}
t.Logf("repository grown by %d bytes", stat2.size-stat1.size)
- OK(t, appendRandomData(testfile, incrementalThirdWrite))
+ rtest.OK(t, appendRandomData(testfile, incrementalThirdWrite))
testRunBackup(t, []string{datadir}, opts, env.gopts)
testRunCheck(t, env.gopts)
@@ -647,28 +646,32 @@ func TestBackupTags(t *testing.T) {
datafile := filepath.Join("testdata", "backup-data.tar.gz")
testRunInit(t, env.gopts)
- SetupTarTestFixture(t, env.testdata, datafile)
+ rtest.SetupTarTestFixture(t, env.testdata, datafile)
opts := BackupOptions{}
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
testRunCheck(t, env.gopts)
newest, _ := testRunSnapshots(t, env.gopts)
- Assert(t, newest != nil, "expected a new backup, got nil")
- Assert(t, len(newest.Tags) == 0,
+ rtest.Assert(t, newest != nil, "expected a new backup, got nil")
+ rtest.Assert(t, len(newest.Tags) == 0,
"expected no tags, got %v", newest.Tags)
+ parent := newest
opts.Tags = []string{"NL"}
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
testRunCheck(t, env.gopts)
newest, _ = testRunSnapshots(t, env.gopts)
- Assert(t, newest != nil, "expected a new backup, got nil")
- Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL",
+ rtest.Assert(t, newest != nil, "expected a new backup, got nil")
+ rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL",
"expected one NL tag, got %v", newest.Tags)
+ // Tagged backup should have untagged backup as parent.
+ rtest.Assert(t, parent.ID.Equal(*newest.Parent),
+ "expected parent to be %v, got %v", parent.ID, newest.Parent)
}
func testRunTag(t testing.TB, opts TagOptions, gopts GlobalOptions) {
- OK(t, runTag(opts, gopts, []string{}))
+ rtest.OK(t, runTag(opts, gopts, []string{}))
}
func TestTag(t *testing.T) {
@@ -677,68 +680,68 @@ func TestTag(t *testing.T) {
datafile := filepath.Join("testdata", "backup-data.tar.gz")
testRunInit(t, env.gopts)
- SetupTarTestFixture(t, env.testdata, datafile)
+ rtest.SetupTarTestFixture(t, env.testdata, datafile)
testRunBackup(t, []string{env.testdata}, BackupOptions{}, env.gopts)
testRunCheck(t, env.gopts)
newest, _ := testRunSnapshots(t, env.gopts)
- Assert(t, newest != nil, "expected a new backup, got nil")
- Assert(t, len(newest.Tags) == 0,
+ rtest.Assert(t, newest != nil, "expected a new backup, got nil")
+ rtest.Assert(t, len(newest.Tags) == 0,
"expected no tags, got %v", newest.Tags)
- Assert(t, newest.Original == nil,
+ rtest.Assert(t, newest.Original == nil,
"expected original ID to be nil, got %v", newest.Original)
originalID := *newest.ID
testRunTag(t, TagOptions{SetTags: []string{"NL"}}, env.gopts)
testRunCheck(t, env.gopts)
newest, _ = testRunSnapshots(t, env.gopts)
- Assert(t, newest != nil, "expected a new backup, got nil")
- Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL",
+ rtest.Assert(t, newest != nil, "expected a new backup, got nil")
+ rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL",
"set failed, expected one NL tag, got %v", newest.Tags)
- Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
- Assert(t, *newest.Original == originalID,
+ rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
+ rtest.Assert(t, *newest.Original == originalID,
"expected original ID to be set to the first snapshot id")
testRunTag(t, TagOptions{AddTags: []string{"CH"}}, env.gopts)
testRunCheck(t, env.gopts)
newest, _ = testRunSnapshots(t, env.gopts)
- Assert(t, newest != nil, "expected a new backup, got nil")
- Assert(t, len(newest.Tags) == 2 && newest.Tags[0] == "NL" && newest.Tags[1] == "CH",
+ rtest.Assert(t, newest != nil, "expected a new backup, got nil")
+ rtest.Assert(t, len(newest.Tags) == 2 && newest.Tags[0] == "NL" && newest.Tags[1] == "CH",
"add failed, expected CH,NL tags, got %v", newest.Tags)
- Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
- Assert(t, *newest.Original == originalID,
+ rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
+ rtest.Assert(t, *newest.Original == originalID,
"expected original ID to be set to the first snapshot id")
testRunTag(t, TagOptions{RemoveTags: []string{"NL"}}, env.gopts)
testRunCheck(t, env.gopts)
newest, _ = testRunSnapshots(t, env.gopts)
- Assert(t, newest != nil, "expected a new backup, got nil")
- Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "CH",
+ rtest.Assert(t, newest != nil, "expected a new backup, got nil")
+ rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "CH",
"remove failed, expected one CH tag, got %v", newest.Tags)
- Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
- Assert(t, *newest.Original == originalID,
+ rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
+ rtest.Assert(t, *newest.Original == originalID,
"expected original ID to be set to the first snapshot id")
testRunTag(t, TagOptions{AddTags: []string{"US", "RU"}}, env.gopts)
testRunTag(t, TagOptions{RemoveTags: []string{"CH", "US", "RU"}}, env.gopts)
testRunCheck(t, env.gopts)
newest, _ = testRunSnapshots(t, env.gopts)
- Assert(t, newest != nil, "expected a new backup, got nil")
- Assert(t, len(newest.Tags) == 0,
+ rtest.Assert(t, newest != nil, "expected a new backup, got nil")
+ rtest.Assert(t, len(newest.Tags) == 0,
"expected no tags, got %v", newest.Tags)
- Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
- Assert(t, *newest.Original == originalID,
+ rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
+ rtest.Assert(t, *newest.Original == originalID,
"expected original ID to be set to the first snapshot id")
// Check special case of removing all tags.
testRunTag(t, TagOptions{SetTags: []string{""}}, env.gopts)
testRunCheck(t, env.gopts)
newest, _ = testRunSnapshots(t, env.gopts)
- Assert(t, newest != nil, "expected a new backup, got nil")
- Assert(t, len(newest.Tags) == 0,
+ rtest.Assert(t, newest != nil, "expected a new backup, got nil")
+ rtest.Assert(t, len(newest.Tags) == 0,
"expected no tags, got %v", newest.Tags)
- Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
- Assert(t, *newest.Original == originalID,
+ rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
+ rtest.Assert(t, *newest.Original == originalID,
"expected original ID to be set to the first snapshot id")
}
@@ -750,7 +753,7 @@ func testRunKeyListOtherIDs(t testing.TB, gopts GlobalOptions) []string {
globalOptions.stdout = os.Stdout
}()
- OK(t, runKey(gopts, []string{"list"}))
+ rtest.OK(t, runKey(gopts, []string{"list"}))
scanner := bufio.NewScanner(buf)
exp := regexp.MustCompile(`^ ([a-f0-9]+) `)
@@ -771,7 +774,7 @@ func testRunKeyAddNewKey(t testing.TB, newPassword string, gopts GlobalOptions)
testKeyNewPassword = ""
}()
- OK(t, runKey(gopts, []string{"add"}))
+ rtest.OK(t, runKey(gopts, []string{"add"}))
}
func testRunKeyPasswd(t testing.TB, newPassword string, gopts GlobalOptions) {
@@ -780,13 +783,13 @@ func testRunKeyPasswd(t testing.TB, newPassword string, gopts GlobalOptions) {
testKeyNewPassword = ""
}()
- OK(t, runKey(gopts, []string{"passwd"}))
+ rtest.OK(t, runKey(gopts, []string{"passwd"}))
}
func testRunKeyRemove(t testing.TB, gopts GlobalOptions, IDs []string) {
t.Logf("remove %d keys: %q\n", len(IDs), IDs)
for _, id := range IDs {
- OK(t, runKey(gopts, []string{"remove", id}))
+ rtest.OK(t, runKey(gopts, []string{"remove", id}))
}
}
@@ -814,7 +817,7 @@ func TestKeyAddRemove(t *testing.T) {
env.gopts.password = passwordList[len(passwordList)-1]
t.Logf("testing access with last password %q\n", env.gopts.password)
- OK(t, runKey(env.gopts, []string{"list"}))
+ rtest.OK(t, runKey(env.gopts, []string{"list"}))
testRunCheck(t, env.gopts)
}
@@ -847,10 +850,10 @@ func TestRestoreFilter(t *testing.T) {
testRunInit(t, env.gopts)
- for _, test := range testfiles {
- p := filepath.Join(env.testdata, test.name)
- OK(t, os.MkdirAll(filepath.Dir(p), 0755))
- OK(t, appendRandomData(p, test.size))
+ for _, testFile := range testfiles {
+ p := filepath.Join(env.testdata, testFile.name)
+ rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
+ rtest.OK(t, appendRandomData(p, testFile.size))
}
opts := BackupOptions{}
@@ -862,20 +865,20 @@ func TestRestoreFilter(t *testing.T) {
// no restore filter should restore all files
testRunRestore(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID)
- for _, test := range testfiles {
- OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", test.name), int64(test.size)))
+ for _, testFile := range testfiles {
+ rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", testFile.name), int64(testFile.size)))
}
for i, pat := range []string{"*.c", "*.exe", "*", "*file3*"} {
base := filepath.Join(env.base, fmt.Sprintf("restore%d", i+1))
testRunRestoreExcludes(t, env.gopts, base, snapshotID, []string{pat})
- for _, test := range testfiles {
- err := testFileSize(filepath.Join(base, "testdata", test.name), int64(test.size))
- if ok, _ := filter.Match(pat, filepath.Base(test.name)); !ok {
- OK(t, err)
+ for _, testFile := range testfiles {
+ err := testFileSize(filepath.Join(base, "testdata", testFile.name), int64(testFile.size))
+ if ok, _ := filter.Match(pat, filepath.Base(testFile.name)); !ok {
+ rtest.OK(t, err)
} else {
- Assert(t, os.IsNotExist(errors.Cause(err)),
- "expected %v to not exist in restore step %v, but it exists, err %v", test.name, i+1, err)
+ rtest.Assert(t, os.IsNotExist(errors.Cause(err)),
+ "expected %v to not exist in restore step %v, but it exists, err %v", testFile.name, i+1, err)
}
}
}
@@ -889,8 +892,8 @@ func TestRestore(t *testing.T) {
for i := 0; i < 10; i++ {
p := filepath.Join(env.testdata, fmt.Sprintf("foo/bar/testfile%v", i))
- OK(t, os.MkdirAll(filepath.Dir(p), 0755))
- OK(t, appendRandomData(p, uint(mrand.Intn(5<<21))))
+ rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
+ rtest.OK(t, appendRandomData(p, uint(mrand.Intn(5<<21))))
}
opts := BackupOptions{}
@@ -902,7 +905,7 @@ func TestRestore(t *testing.T) {
restoredir := filepath.Join(env.base, "restore")
testRunRestoreLatest(t, env.gopts, restoredir, nil, "")
- Assert(t, directoriesEqualContents(env.testdata, filepath.Join(restoredir, filepath.Base(env.testdata))),
+ rtest.Assert(t, directoriesEqualContents(env.testdata, filepath.Join(restoredir, filepath.Base(env.testdata))),
"directories are not equal")
}
@@ -913,8 +916,8 @@ func TestRestoreLatest(t *testing.T) {
testRunInit(t, env.gopts)
p := filepath.Join(env.testdata, "testfile.c")
- OK(t, os.MkdirAll(filepath.Dir(p), 0755))
- OK(t, appendRandomData(p, 100))
+ rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
+ rtest.OK(t, appendRandomData(p, 100))
opts := BackupOptions{}
@@ -922,24 +925,24 @@ func TestRestoreLatest(t *testing.T) {
testRunCheck(t, env.gopts)
os.Remove(p)
- OK(t, appendRandomData(p, 101))
+ rtest.OK(t, appendRandomData(p, 101))
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
testRunCheck(t, env.gopts)
// Restore latest without any filters
testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore0"), nil, "")
- OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", "testfile.c"), int64(101)))
+ rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", "testfile.c"), int64(101)))
// Setup test files in different directories backed up in different snapshots
p1 := filepath.Join(env.testdata, "p1/testfile.c")
- OK(t, os.MkdirAll(filepath.Dir(p1), 0755))
- OK(t, appendRandomData(p1, 102))
+ rtest.OK(t, os.MkdirAll(filepath.Dir(p1), 0755))
+ rtest.OK(t, appendRandomData(p1, 102))
testRunBackup(t, []string{filepath.Dir(p1)}, opts, env.gopts)
testRunCheck(t, env.gopts)
p2 := filepath.Join(env.testdata, "p2/testfile.c")
- OK(t, os.MkdirAll(filepath.Dir(p2), 0755))
- OK(t, appendRandomData(p2, 103))
+ rtest.OK(t, os.MkdirAll(filepath.Dir(p2), 0755))
+ rtest.OK(t, appendRandomData(p2, 103))
testRunBackup(t, []string{filepath.Dir(p2)}, opts, env.gopts)
testRunCheck(t, env.gopts)
@@ -947,16 +950,16 @@ func TestRestoreLatest(t *testing.T) {
p2rAbs := filepath.Join(env.base, "restore2", "p2/testfile.c")
testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore1"), []string{filepath.Dir(p1)}, "")
- OK(t, testFileSize(p1rAbs, int64(102)))
+ rtest.OK(t, testFileSize(p1rAbs, int64(102)))
if _, err := os.Stat(p2rAbs); os.IsNotExist(errors.Cause(err)) {
- Assert(t, os.IsNotExist(errors.Cause(err)),
+ rtest.Assert(t, os.IsNotExist(errors.Cause(err)),
"expected %v to not exist in restore, but it exists, err %v", p2rAbs, err)
}
testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore2"), []string{filepath.Dir(p2)}, "")
- OK(t, testFileSize(p2rAbs, int64(103)))
+ rtest.OK(t, testFileSize(p2rAbs, int64(103)))
if _, err := os.Stat(p1rAbs); os.IsNotExist(errors.Cause(err)) {
- Assert(t, os.IsNotExist(errors.Cause(err)),
+ rtest.Assert(t, os.IsNotExist(errors.Cause(err)),
"expected %v to not exist in restore, but it exists, err %v", p1rAbs, err)
}
}
@@ -966,10 +969,10 @@ func TestRestoreWithPermissionFailure(t *testing.T) {
defer cleanup()
datafile := filepath.Join("testdata", "repo-restore-permissions-test.tar.gz")
- SetupTarTestFixture(t, env.base, datafile)
+ rtest.SetupTarTestFixture(t, env.base, datafile)
snapshots := testRunList(t, "snapshots", env.gopts)
- Assert(t, len(snapshots) > 0,
+ rtest.Assert(t, len(snapshots) > 0,
"no snapshots found in repo (%v)", datafile)
globalOptions.stderr = ioutil.Discard
@@ -984,9 +987,9 @@ func TestRestoreWithPermissionFailure(t *testing.T) {
files := testRunLs(t, env.gopts, snapshots[0].String())
for _, filename := range files {
fi, err := os.Lstat(filepath.Join(env.base, "restore", filename))
- OK(t, err)
+ rtest.OK(t, err)
- Assert(t, !isFile(fi) || fi.Size() > 0,
+ rtest.Assert(t, !isFile(fi) || fi.Size() > 0,
"file %v restored, but filesize is 0", filename)
}
}
@@ -1007,9 +1010,9 @@ func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) {
testRunInit(t, env.gopts)
p := filepath.Join(env.testdata, "subdir1", "subdir2", "subdir3", "file.ext")
- OK(t, os.MkdirAll(filepath.Dir(p), 0755))
- OK(t, appendRandomData(p, 200))
- OK(t, setZeroModTime(filepath.Join(env.testdata, "subdir1", "subdir2")))
+ rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
+ rtest.OK(t, appendRandomData(p, 200))
+ rtest.OK(t, setZeroModTime(filepath.Join(env.testdata, "subdir1", "subdir2")))
opts := BackupOptions{}
@@ -1025,9 +1028,9 @@ func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) {
f1 := filepath.Join(env.base, "restore0", "testdata", "subdir1", "subdir2")
fi, err := os.Stat(f1)
- OK(t, err)
+ rtest.OK(t, err)
- Assert(t, fi.ModTime() != time.Unix(0, 0),
+ rtest.Assert(t, fi.ModTime() != time.Unix(0, 0),
"meta data of intermediate directory has been restore although it was ignored")
// restore with filter "*", this should restore meta data on everything.
@@ -1035,9 +1038,9 @@ func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) {
f2 := filepath.Join(env.base, "restore1", "testdata", "subdir1", "subdir2")
fi, err = os.Stat(f2)
- OK(t, err)
+ rtest.OK(t, err)
- Assert(t, fi.ModTime() == time.Unix(0, 0),
+ rtest.Assert(t, fi.ModTime() == time.Unix(0, 0),
"meta data of intermediate directory hasn't been restore")
}
@@ -1047,7 +1050,7 @@ func TestFind(t *testing.T) {
datafile := filepath.Join("testdata", "backup-data.tar.gz")
testRunInit(t, env.gopts)
- SetupTarTestFixture(t, env.testdata, datafile)
+ rtest.SetupTarTestFixture(t, env.testdata, datafile)
opts := BackupOptions{}
@@ -1055,15 +1058,15 @@ func TestFind(t *testing.T) {
testRunCheck(t, env.gopts)
results := testRunFind(t, false, env.gopts, "unexistingfile")
- Assert(t, len(results) == 0, "unexisting file found in repo (%v)", datafile)
+ rtest.Assert(t, len(results) == 0, "unexisting file found in repo (%v)", datafile)
results = testRunFind(t, false, env.gopts, "testfile")
lines := strings.Split(string(results), "\n")
- Assert(t, len(lines) == 2, "expected one file found in repo (%v)", datafile)
+ rtest.Assert(t, len(lines) == 2, "expected one file found in repo (%v)", datafile)
results = testRunFind(t, false, env.gopts, "testfile*")
lines = strings.Split(string(results), "\n")
- Assert(t, len(lines) == 4, "expected three files found in repo (%v)", datafile)
+ rtest.Assert(t, len(lines) == 4, "expected three files found in repo (%v)", datafile)
}
type testMatch struct {
@@ -1087,7 +1090,7 @@ func TestFindJSON(t *testing.T) {
datafile := filepath.Join("testdata", "backup-data.tar.gz")
testRunInit(t, env.gopts)
- SetupTarTestFixture(t, env.testdata, datafile)
+ rtest.SetupTarTestFixture(t, env.testdata, datafile)
opts := BackupOptions{}
@@ -1096,20 +1099,20 @@ func TestFindJSON(t *testing.T) {
results := testRunFind(t, true, env.gopts, "unexistingfile")
matches := []testMatches{}
- OK(t, json.Unmarshal(results, &matches))
- Assert(t, len(matches) == 0, "expected no match in repo (%v)", datafile)
+ rtest.OK(t, json.Unmarshal(results, &matches))
+ rtest.Assert(t, len(matches) == 0, "expected no match in repo (%v)", datafile)
results = testRunFind(t, true, env.gopts, "testfile")
- OK(t, json.Unmarshal(results, &matches))
- Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile)
- Assert(t, len(matches[0].Matches) == 1, "expected a single file to match (%v)", datafile)
- Assert(t, matches[0].Hits == 1, "expected hits to show 1 match (%v)", datafile)
+ rtest.OK(t, json.Unmarshal(results, &matches))
+ rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile)
+ rtest.Assert(t, len(matches[0].Matches) == 1, "expected a single file to match (%v)", datafile)
+ rtest.Assert(t, matches[0].Hits == 1, "expected hits to show 1 match (%v)", datafile)
results = testRunFind(t, true, env.gopts, "testfile*")
- OK(t, json.Unmarshal(results, &matches))
- Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile)
- Assert(t, len(matches[0].Matches) == 3, "expected 3 files to match (%v)", datafile)
- Assert(t, matches[0].Hits == 3, "expected hits to show 3 matches (%v)", datafile)
+ rtest.OK(t, json.Unmarshal(results, &matches))
+ rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile)
+ rtest.Assert(t, len(matches[0].Matches) == 3, "expected 3 files to match (%v)", datafile)
+ rtest.Assert(t, matches[0].Hits == 3, "expected hits to show 3 matches (%v)", datafile)
}
func TestRebuildIndex(t *testing.T) {
@@ -1117,7 +1120,7 @@ func TestRebuildIndex(t *testing.T) {
defer cleanup()
datafile := filepath.Join("..", "..", "internal", "checker", "testdata", "duplicate-packs-in-index-test-repo.tar.gz")
- SetupTarTestFixture(t, env.base, datafile)
+ rtest.SetupTarTestFixture(t, env.base, datafile)
out, err := testRunCheckOutput(env.gopts)
if !strings.Contains(out, "contained in several indexes") {
@@ -1154,7 +1157,7 @@ func TestCheckRestoreNoLock(t *testing.T) {
defer cleanup()
datafile := filepath.Join("testdata", "small-repo.tar.gz")
- SetupTarTestFixture(t, env.base, datafile)
+ rtest.SetupTarTestFixture(t, env.base, datafile)
err := filepath.Walk(env.repo, func(p string, fi os.FileInfo, e error) error {
if e != nil {
@@ -1162,7 +1165,7 @@ func TestCheckRestoreNoLock(t *testing.T) {
}
return os.Chmod(p, fi.Mode() & ^(os.FileMode(0222)))
})
- OK(t, err)
+ rtest.OK(t, err)
env.gopts.NoLock = true
@@ -1186,24 +1189,24 @@ func TestPrune(t *testing.T) {
t.Skipf("unable to find data file %q, skipping", datafile)
return
}
- OK(t, err)
- OK(t, fd.Close())
+ rtest.OK(t, err)
+ rtest.OK(t, fd.Close())
testRunInit(t, env.gopts)
- SetupTarTestFixture(t, env.testdata, datafile)
+ rtest.SetupTarTestFixture(t, env.testdata, datafile)
opts := BackupOptions{}
testRunBackup(t, []string{filepath.Join(env.testdata, "0", "0")}, opts, env.gopts)
firstSnapshot := testRunList(t, "snapshots", env.gopts)
- Assert(t, len(firstSnapshot) == 1,
+ rtest.Assert(t, len(firstSnapshot) == 1,
"expected one snapshot, got %v", firstSnapshot)
testRunBackup(t, []string{filepath.Join(env.testdata, "0", "0", "2")}, opts, env.gopts)
testRunBackup(t, []string{filepath.Join(env.testdata, "0", "0", "3")}, opts, env.gopts)
snapshotIDs := testRunList(t, "snapshots", env.gopts)
- Assert(t, len(snapshotIDs) == 3,
+ rtest.Assert(t, len(snapshotIDs) == 3,
"expected 3 snapshot, got %v", snapshotIDs)
testRunForget(t, env.gopts, firstSnapshot[0].String())
@@ -1222,12 +1225,12 @@ func TestHardLink(t *testing.T) {
t.Skipf("unable to find data file %q, skipping", datafile)
return
}
- OK(t, err)
- OK(t, fd.Close())
+ rtest.OK(t, err)
+ rtest.OK(t, fd.Close())
testRunInit(t, env.gopts)
- SetupTarTestFixture(t, env.testdata, datafile)
+ rtest.SetupTarTestFixture(t, env.testdata, datafile)
linkTests := createFileSetPerHardlink(env.testdata)
@@ -1236,7 +1239,7 @@ func TestHardLink(t *testing.T) {
// first backup
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
snapshotIDs := testRunList(t, "snapshots", env.gopts)
- Assert(t, len(snapshotIDs) == 1,
+ rtest.Assert(t, len(snapshotIDs) == 1,
"expected one snapshot, got %v", snapshotIDs)
testRunCheck(t, env.gopts)
@@ -1246,11 +1249,11 @@ func TestHardLink(t *testing.T) {
restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir)
testRunRestore(t, env.gopts, restoredir, snapshotIDs[0])
- Assert(t, directoriesEqualContents(env.testdata, filepath.Join(restoredir, "testdata")),
+ rtest.Assert(t, directoriesEqualContents(env.testdata, filepath.Join(restoredir, "testdata")),
"directories are not equal")
linkResults := createFileSetPerHardlink(filepath.Join(restoredir, "testdata"))
- Assert(t, linksEqual(linkTests, linkResults),
+ rtest.Assert(t, linksEqual(linkTests, linkResults),
"links are not equal")
}
diff --git a/cmd/restic/local_layout_test.go b/cmd/restic/local_layout_test.go
index 4d22342ee..c76112e00 100644
--- a/cmd/restic/local_layout_test.go
+++ b/cmd/restic/local_layout_test.go
@@ -4,7 +4,7 @@ import (
"path/filepath"
"testing"
- . "github.com/restic/restic/internal/test"
+ rtest "github.com/restic/restic/internal/test"
)
func TestRestoreLocalLayout(t *testing.T) {
@@ -24,7 +24,7 @@ func TestRestoreLocalLayout(t *testing.T) {
for _, test := range tests {
datafile := filepath.Join("..", "..", "internal", "backend", "testdata", test.filename)
- SetupTarTestFixture(t, env.base, datafile)
+ rtest.SetupTarTestFixture(t, env.base, datafile)
env.gopts.extended["local.layout"] = test.layout
@@ -35,7 +35,7 @@ func TestRestoreLocalLayout(t *testing.T) {
target := filepath.Join(env.base, "restore")
testRunRestoreLatest(t, env.gopts, target, nil, "")
- RemoveAll(t, filepath.Join(env.base, "repo"))
- RemoveAll(t, target)
+ rtest.RemoveAll(t, filepath.Join(env.base, "repo"))
+ rtest.RemoveAll(t, target)
}
}
diff --git a/cmd/restic/lock.go b/cmd/restic/lock.go
index c147a9cb5..d0f44ee77 100644
--- a/cmd/restic/lock.go
+++ b/cmd/restic/lock.go
@@ -8,6 +8,7 @@ import (
"time"
"github.com/restic/restic/internal/debug"
+ "github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
)
@@ -35,7 +36,7 @@ func lockRepository(repo *repository.Repository, exclusive bool) (*restic.Lock,
lock, err := lockFn(context.TODO(), repo)
if err != nil {
- return nil, err
+ return nil, errors.Fatalf("unable to create lock in backend: %v", err)
}
debug.Log("create lock %p (exclusive %v)", lock, exclusive)
diff --git a/cmd/restic/table.go b/cmd/restic/table.go
index 7a5d17a53..b2fa772f8 100644
--- a/cmd/restic/table.go
+++ b/cmd/restic/table.go
@@ -10,6 +10,7 @@ import (
type Table struct {
Header string
Rows [][]interface{}
+ Footer string
RowFormat string
}
@@ -21,13 +22,19 @@ func NewTable() Table {
}
}
+func (t Table) printSeparationLine(w io.Writer) error {
+ _, err := fmt.Fprintln(w, strings.Repeat("-", 70))
+ return err
+}
+
// Write prints the table to w.
func (t Table) Write(w io.Writer) error {
_, err := fmt.Fprintln(w, t.Header)
if err != nil {
return err
}
- _, err = fmt.Fprintln(w, strings.Repeat("-", 70))
+
+ err = t.printSeparationLine(w)
if err != nil {
return err
}
@@ -39,6 +46,16 @@ func (t Table) Write(w io.Writer) error {
}
}
+ err = t.printSeparationLine(w)
+ if err != nil {
+ return err
+ }
+
+ _, err = fmt.Fprintln(w, t.Footer)
+ if err != nil {
+ return err
+ }
+
return nil
}