summaryrefslogtreecommitdiff
path: root/cmd
diff options
context:
space:
mode:
authorFélix Sipma <felix.sipma@no-log.org>2017-09-26 10:17:15 +0200
committerFélix Sipma <felix.sipma@no-log.org>2017-09-26 10:17:15 +0200
commit4bb9f95800766be38ccb07ec9a5134e87e5b0316 (patch)
tree6b813c4525e774cd33ec625cd30ad923dc853ce6 /cmd
parent43c168d49256c4546a6c6679b47e9e2795dbb74b (diff)
New upstream version 0.7.3+debian0
Diffstat (limited to 'cmd')
-rw-r--r--cmd/restic/.gitignore1
-rw-r--r--cmd/restic/background.go9
-rw-r--r--cmd/restic/background_linux.go21
-rw-r--r--cmd/restic/cleanup.go74
-rw-r--r--cmd/restic/cmd_autocomplete.go37
-rw-r--r--cmd/restic/cmd_backup.go504
-rw-r--r--cmd/restic/cmd_cat.go191
-rw-r--r--cmd/restic/cmd_check.go170
-rw-r--r--cmd/restic/cmd_dump.go211
-rw-r--r--cmd/restic/cmd_find.go308
-rw-r--r--cmd/restic/cmd_forget.go238
-rw-r--r--cmd/restic/cmd_init.go59
-rw-r--r--cmd/restic/cmd_key.go180
-rw-r--r--cmd/restic/cmd_list.go82
-rw-r--r--cmd/restic/cmd_ls.go92
-rw-r--r--cmd/restic/cmd_manpage.go70
-rw-r--r--cmd/restic/cmd_migrate.go108
-rw-r--r--cmd/restic/cmd_mount.go150
-rw-r--r--cmd/restic/cmd_options.go29
-rw-r--r--cmd/restic/cmd_prune.go279
-rw-r--r--cmd/restic/cmd_rebuild_index.go86
-rw-r--r--cmd/restic/cmd_restore.go157
-rw-r--r--cmd/restic/cmd_snapshots.go186
-rw-r--r--cmd/restic/cmd_tag.go143
-rw-r--r--cmd/restic/cmd_unlock.go53
-rw-r--r--cmd/restic/cmd_version.go26
-rw-r--r--cmd/restic/doc.go2
-rw-r--r--cmd/restic/exclude.go179
-rw-r--r--cmd/restic/exclude_test.go84
-rw-r--r--cmd/restic/find.go70
-rw-r--r--cmd/restic/flags_test.go24
-rw-r--r--cmd/restic/format.go84
-rw-r--r--cmd/restic/global.go542
-rw-r--r--cmd/restic/global_debug.go75
-rw-r--r--cmd/restic/global_release.go6
-rw-r--r--cmd/restic/integration_fuse_test.go206
-rw-r--r--cmd/restic/integration_helpers_test.go221
-rw-r--r--cmd/restic/integration_helpers_unix_test.go70
-rw-r--r--cmd/restic/integration_helpers_windows_test.go49
-rw-r--r--cmd/restic/integration_test.go1310
-rw-r--r--cmd/restic/local_layout_test.go41
-rw-r--r--cmd/restic/lock.go127
-rw-r--r--cmd/restic/main.go94
-rw-r--r--cmd/restic/table.go46
-rw-r--r--cmd/restic/testdata/backup-data.tar.gzbin0 -> 177734 bytes
-rw-r--r--cmd/restic/testdata/old-index-repo.tar.gzbin0 -> 6307407 bytes
-rw-r--r--cmd/restic/testdata/repo-restore-permissions-test.tar.gzbin0 -> 4174 bytes
-rw-r--r--cmd/restic/testdata/repo-same-timestamps.tar.gzbin0 -> 3243 bytes
-rw-r--r--cmd/restic/testdata/small-repo.tar.gzbin0 -> 11747 bytes
-rw-r--r--cmd/restic/testdata/test.hl.tar.gzbin0 -> 198 bytes
50 files changed, 6694 insertions, 0 deletions
diff --git a/cmd/restic/.gitignore b/cmd/restic/.gitignore
new file mode 100644
index 000000000..aee2e4ce1
--- /dev/null
+++ b/cmd/restic/.gitignore
@@ -0,0 +1 @@
+config.mk
diff --git a/cmd/restic/background.go b/cmd/restic/background.go
new file mode 100644
index 000000000..2f115adfd
--- /dev/null
+++ b/cmd/restic/background.go
@@ -0,0 +1,9 @@
+// +build !linux
+
+package main
+
+// IsProcessBackground should return true if it is running in the background or false if not
+func IsProcessBackground() bool {
+ //TODO: Check if the process are running in the background in other OS than linux
+ return false
+}
diff --git a/cmd/restic/background_linux.go b/cmd/restic/background_linux.go
new file mode 100644
index 000000000..b9a2a2f00
--- /dev/null
+++ b/cmd/restic/background_linux.go
@@ -0,0 +1,21 @@
+package main
+
+import (
+ "syscall"
+ "unsafe"
+
+ "github.com/restic/restic/internal/debug"
+)
+
+// IsProcessBackground returns true if it is running in the background or false if not
+func IsProcessBackground() bool {
+ var pid int
+ _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(syscall.Stdin), syscall.TIOCGPGRP, uintptr(unsafe.Pointer(&pid)))
+
+ if err != 0 {
+ debug.Log("Can't check if we are in the background. Using default behaviour. Error: %s\n", err.Error())
+ return false
+ }
+
+ return pid != syscall.Getpgrp()
+}
diff --git a/cmd/restic/cleanup.go b/cmd/restic/cleanup.go
new file mode 100644
index 000000000..12dbdc142
--- /dev/null
+++ b/cmd/restic/cleanup.go
@@ -0,0 +1,74 @@
+package main
+
+import (
+ "fmt"
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+
+ "github.com/restic/restic/internal/debug"
+)
+
+var cleanupHandlers struct {
+ sync.Mutex
+ list []func() error
+ done bool
+}
+
+var stderr = os.Stderr
+
+func init() {
+ c := make(chan os.Signal)
+ signal.Notify(c, syscall.SIGINT)
+
+ go CleanupHandler(c)
+}
+
+// AddCleanupHandler adds the function f to the list of cleanup handlers so
+// that it is executed when all the cleanup handlers are run, e.g. when SIGINT
+// is received.
+func AddCleanupHandler(f func() error) {
+ cleanupHandlers.Lock()
+ defer cleanupHandlers.Unlock()
+
+ // reset the done flag for integration tests
+ cleanupHandlers.done = false
+
+ cleanupHandlers.list = append(cleanupHandlers.list, f)
+}
+
+// RunCleanupHandlers runs all registered cleanup handlers
+func RunCleanupHandlers() {
+ cleanupHandlers.Lock()
+ defer cleanupHandlers.Unlock()
+
+ if cleanupHandlers.done {
+ return
+ }
+ cleanupHandlers.done = true
+
+ for _, f := range cleanupHandlers.list {
+ err := f()
+ if err != nil {
+ fmt.Fprintf(stderr, "error in cleanup handler: %v\n", err)
+ }
+ }
+ cleanupHandlers.list = nil
+}
+
+// CleanupHandler handles the SIGINT signal.
+func CleanupHandler(c <-chan os.Signal) {
+ for s := range c {
+ debug.Log("signal %v received, cleaning up", s)
+ fmt.Printf("%sInterrupt received, cleaning up\n", ClearLine())
+ Exit(0)
+ }
+}
+
+// Exit runs the cleanup handlers and then terminates the process with the
+// given exit code.
+func Exit(code int) {
+ RunCleanupHandlers()
+ os.Exit(code)
+}
diff --git a/cmd/restic/cmd_autocomplete.go b/cmd/restic/cmd_autocomplete.go
new file mode 100644
index 000000000..643bd96bf
--- /dev/null
+++ b/cmd/restic/cmd_autocomplete.go
@@ -0,0 +1,37 @@
+package main
+
+import (
+ "github.com/spf13/cobra"
+)
+
+var cmdAutocomplete = &cobra.Command{
+ Use: "autocomplete",
+ Short: "Generate shell autocompletion script",
+ Long: `The "autocomplete" command generates a shell autocompletion script.
+
+NOTE: The current version supports Bash only.
+ This should work for *nix systems with Bash installed.
+
+By default, the file is written directly to /etc/bash_completion.d
+for convenience, and the command may need superuser rights, e.g.:
+
+$ sudo restic autocomplete`,
+
+ DisableAutoGenTag: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ if err := cmdRoot.GenBashCompletionFile(autocompleteTarget); err != nil {
+ return err
+ }
+ return nil
+ },
+}
+
+var autocompleteTarget string
+
+func init() {
+ cmdRoot.AddCommand(cmdAutocomplete)
+
+ cmdAutocomplete.Flags().StringVarP(&autocompleteTarget, "completionfile", "", "/usr/share/bash-completion/completions/restic", "autocompletion file")
+ // For bash-completion
+ cmdAutocomplete.Flags().SetAnnotation("completionfile", cobra.BashCompFilenameExt, []string{})
+}
diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go
new file mode 100644
index 000000000..8a69f5cbf
--- /dev/null
+++ b/cmd/restic/cmd_backup.go
@@ -0,0 +1,504 @@
+package main
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/spf13/cobra"
+
+ "github.com/restic/restic/internal/archiver"
+ "github.com/restic/restic/internal/debug"
+ "github.com/restic/restic/internal/errors"
+ "github.com/restic/restic/internal/fs"
+ "github.com/restic/restic/internal/restic"
+)
+
+var cmdBackup = &cobra.Command{
+ Use: "backup [flags] FILE/DIR [FILE/DIR] ...",
+ Short: "Create a new backup of files and/or directories",
+ Long: `
+The "backup" command creates a new snapshot and saves the files and directories
+given as the arguments.
+`,
+ PreRun: func(cmd *cobra.Command, args []string) {
+ if backupOptions.Hostname == "" {
+ hostname, err := os.Hostname()
+ if err != nil {
+ debug.Log("os.Hostname() returned err: %v", err)
+ return
+ }
+ backupOptions.Hostname = hostname
+ }
+ },
+ DisableAutoGenTag: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ if backupOptions.Stdin && backupOptions.FilesFrom == "-" {
+ return errors.Fatal("cannot use both `--stdin` and `--files-from -`")
+ }
+
+ if backupOptions.Stdin {
+ return readBackupFromStdin(backupOptions, globalOptions, args)
+ }
+
+ return runBackup(backupOptions, globalOptions, args)
+ },
+}
+
+// BackupOptions bundles all options for the backup command.
+type BackupOptions struct {
+ Parent string
+ Force bool
+ Excludes []string
+ ExcludeFiles []string
+ ExcludeOtherFS bool
+ ExcludeIfPresent []string
+ ExcludeCaches bool
+ Stdin bool
+ StdinFilename string
+ Tags []string
+ Hostname string
+ FilesFrom string
+ TimeStamp string
+}
+
+var backupOptions BackupOptions
+
+func init() {
+ cmdRoot.AddCommand(cmdBackup)
+
+ f := cmdBackup.Flags()
+ f.StringVar(&backupOptions.Parent, "parent", "", "use this parent snapshot (default: last snapshot in the repo that has the same target files/directories)")
+ f.BoolVarP(&backupOptions.Force, "force", "f", false, `force re-reading the target files/directories (overrides the "parent" flag)`)
+ f.StringArrayVarP(&backupOptions.Excludes, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)")
+ f.StringArrayVar(&backupOptions.ExcludeFiles, "exclude-file", nil, "read exclude patterns from a `file` (can be specified multiple times)")
+ f.BoolVarP(&backupOptions.ExcludeOtherFS, "one-file-system", "x", false, "exclude other file systems")
+ f.StringArrayVar(&backupOptions.ExcludeIfPresent, "exclude-if-present", nil, "takes filename[:header], exclude contents of directories containing filename (except filename itself) if header of that file is as provided (can be specified multiple times)")
+ f.BoolVar(&backupOptions.ExcludeCaches, "exclude-caches", false, `excludes cache directories that are marked with a CACHEDIR.TAG file`)
+ f.BoolVar(&backupOptions.Stdin, "stdin", false, "read backup from stdin")
+ f.StringVar(&backupOptions.StdinFilename, "stdin-filename", "stdin", "file name to use when reading from stdin")
+ f.StringArrayVar(&backupOptions.Tags, "tag", nil, "add a `tag` for the new snapshot (can be specified multiple times)")
+ f.StringVar(&backupOptions.Hostname, "hostname", "", "set the `hostname` for the snapshot manually")
+ f.StringVar(&backupOptions.FilesFrom, "files-from", "", "read the files to backup from file (can be combined with file args)")
+ f.StringVar(&backupOptions.TimeStamp, "time", "", "time of the backup (ex. '2012-11-01 22:08:41') (default: now)")
+}
+
+func newScanProgress(gopts GlobalOptions) *restic.Progress {
+ if gopts.Quiet {
+ return nil
+ }
+
+ p := restic.NewProgress()
+ p.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) {
+ if IsProcessBackground() {
+ return
+ }
+
+ PrintProgress("[%s] %d directories, %d files, %s", formatDuration(d), s.Dirs, s.Files, formatBytes(s.Bytes))
+ }
+
+ p.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
+ PrintProgress("scanned %d directories, %d files in %s\n", s.Dirs, s.Files, formatDuration(d))
+ }
+
+ return p
+}
+
+func newArchiveProgress(gopts GlobalOptions, todo restic.Stat) *restic.Progress {
+ if gopts.Quiet {
+ return nil
+ }
+
+ archiveProgress := restic.NewProgress()
+
+ var bps, eta uint64
+ itemsTodo := todo.Files + todo.Dirs
+
+ archiveProgress.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) {
+ if IsProcessBackground() {
+ return
+ }
+
+ sec := uint64(d / time.Second)
+ if todo.Bytes > 0 && sec > 0 && ticker {
+ bps = s.Bytes / sec
+ if s.Bytes >= todo.Bytes {
+ eta = 0
+ } else if bps > 0 {
+ eta = (todo.Bytes - s.Bytes) / bps
+ }
+ }
+
+ itemsDone := s.Files + s.Dirs
+
+ status1 := fmt.Sprintf("[%s] %s %s/s %s / %s %d / %d items %d errors ",
+ formatDuration(d),
+ formatPercent(s.Bytes, todo.Bytes),
+ formatBytes(bps),
+ formatBytes(s.Bytes), formatBytes(todo.Bytes),
+ itemsDone, itemsTodo,
+ s.Errors)
+ status2 := fmt.Sprintf("ETA %s ", formatSeconds(eta))
+
+ if w := stdoutTerminalWidth(); w > 0 {
+ maxlen := w - len(status2) - 1
+
+ if maxlen < 4 {
+ status1 = ""
+ } else if len(status1) > maxlen {
+ status1 = status1[:maxlen-4]
+ status1 += "... "
+ }
+ }
+
+ PrintProgress("%s%s", status1, status2)
+ }
+
+ archiveProgress.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
+ fmt.Printf("\nduration: %s, %s\n", formatDuration(d), formatRate(todo.Bytes, d))
+ }
+
+ return archiveProgress
+}
+
+func newArchiveStdinProgress(gopts GlobalOptions) *restic.Progress {
+ if gopts.Quiet {
+ return nil
+ }
+
+ archiveProgress := restic.NewProgress()
+
+ var bps uint64
+
+ archiveProgress.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) {
+ if IsProcessBackground() {
+ return
+ }
+
+ sec := uint64(d / time.Second)
+ if s.Bytes > 0 && sec > 0 && ticker {
+ bps = s.Bytes / sec
+ }
+
+ status1 := fmt.Sprintf("[%s] %s %s/s", formatDuration(d),
+ formatBytes(s.Bytes),
+ formatBytes(bps))
+
+ if w := stdoutTerminalWidth(); w > 0 {
+ maxlen := w - len(status1)
+
+ if maxlen < 4 {
+ status1 = ""
+ } else if len(status1) > maxlen {
+ status1 = status1[:maxlen-4]
+ status1 += "... "
+ }
+ }
+
+ PrintProgress("%s", status1)
+ }
+
+ archiveProgress.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
+ fmt.Printf("\nduration: %s, %s\n", formatDuration(d), formatRate(s.Bytes, d))
+ }
+
+ return archiveProgress
+}
+
+// filterExisting returns a slice of all existing items, or an error if no
+// items exist at all.
+func filterExisting(items []string) (result []string, err error) {
+ for _, item := range items {
+ _, err := fs.Lstat(item)
+ if err != nil && os.IsNotExist(errors.Cause(err)) {
+ Warnf("%v does not exist, skipping\n", item)
+ continue
+ }
+
+ result = append(result, item)
+ }
+
+ if len(result) == 0 {
+ return nil, errors.Fatal("all target directories/files do not exist")
+ }
+
+ return
+}
+
+func readBackupFromStdin(opts BackupOptions, gopts GlobalOptions, args []string) error {
+ if len(args) != 0 {
+ return errors.Fatal("when reading from stdin, no additional files can be specified")
+ }
+
+ if opts.StdinFilename == "" {
+ return errors.Fatal("filename for backup from stdin must not be empty")
+ }
+
+ if gopts.password == "" {
+ return errors.Fatal("unable to read password from stdin when data is to be read from stdin, use --password-file or $RESTIC_PASSWORD")
+ }
+
+ repo, err := OpenRepository(gopts)
+ if err != nil {
+ return err
+ }
+
+ lock, err := lockRepo(repo)
+ defer unlockRepo(lock)
+ if err != nil {
+ return err
+ }
+
+ err = repo.LoadIndex(context.TODO())
+ if err != nil {
+ return err
+ }
+
+ r := &archiver.Reader{
+ Repository: repo,
+ Tags: opts.Tags,
+ Hostname: opts.Hostname,
+ }
+
+ _, id, err := r.Archive(context.TODO(), opts.StdinFilename, os.Stdin, newArchiveStdinProgress(gopts))
+ if err != nil {
+ return err
+ }
+
+ Verbosef("archived as %v\n", id.Str())
+ return nil
+}
+
+// readFromFile will read all lines from the given filename and write them to a
+// string array, if filename is empty readFromFile returns and empty string
+// array. If filename is a dash (-), readFromFile will read the lines from
+// the standard input.
+func readLinesFromFile(filename string) ([]string, error) {
+ if filename == "" {
+ return nil, nil
+ }
+
+ var r io.Reader = os.Stdin
+ if filename != "-" {
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ r = f
+ }
+
+ var lines []string
+
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ line := scanner.Text()
+ if line == "" {
+ continue
+ }
+ lines = append(lines, line)
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ return lines, nil
+}
+
+func runBackup(opts BackupOptions, gopts GlobalOptions, args []string) error {
+ if opts.FilesFrom == "-" && gopts.password == "" {
+ return errors.Fatal("unable to read password from stdin when data is to be read from stdin, use --password-file or $RESTIC_PASSWORD")
+ }
+
+ fromfile, err := readLinesFromFile(opts.FilesFrom)
+ if err != nil {
+ return err
+ }
+
+ // merge files from files-from into normal args so we can reuse the normal
+ // args checks and have the ability to use both files-from and args at the
+ // same time
+ args = append(args, fromfile...)
+ if len(args) == 0 {
+ return errors.Fatal("nothing to backup, please specify target files/dirs")
+ }
+
+ target := make([]string, 0, len(args))
+ for _, d := range args {
+ if a, err := filepath.Abs(d); err == nil {
+ d = a
+ }
+ target = append(target, d)
+ }
+
+ target, err = filterExisting(target)
+ if err != nil {
+ return err
+ }
+
+ // rejectFuncs collect functions that can reject items from the backup
+ var rejectFuncs []RejectFunc
+
+ // allowed devices
+ if opts.ExcludeOtherFS {
+ f, err := rejectByDevice(target)
+ if err != nil {
+ return err
+ }
+ rejectFuncs = append(rejectFuncs, f)
+ }
+
+ // add patterns from file
+ if len(opts.ExcludeFiles) > 0 {
+ opts.Excludes = append(opts.Excludes, readExcludePatternsFromFiles(opts.ExcludeFiles)...)
+ }
+
+ if len(opts.Excludes) > 0 {
+ rejectFuncs = append(rejectFuncs, rejectByPattern(opts.Excludes))
+ }
+
+ if opts.ExcludeCaches {
+ opts.ExcludeIfPresent = append(opts.ExcludeIfPresent, "CACHEDIR.TAG:Signature: 8a477f597d28d172789f06886806bc55")
+ }
+
+ for _, spec := range opts.ExcludeIfPresent {
+ f, err := rejectIfPresent(spec)
+ if err != nil {
+ return err
+ }
+
+ rejectFuncs = append(rejectFuncs, f)
+ }
+
+ repo, err := OpenRepository(gopts)
+ if err != nil {
+ return err
+ }
+
+ lock, err := lockRepo(repo)
+ defer unlockRepo(lock)
+ if err != nil {
+ return err
+ }
+
+ err = repo.LoadIndex(context.TODO())
+ if err != nil {
+ return err
+ }
+
+ var parentSnapshotID *restic.ID
+
+ // Force using a parent
+ if !opts.Force && opts.Parent != "" {
+ id, err := restic.FindSnapshot(repo, opts.Parent)
+ if err != nil {
+ return errors.Fatalf("invalid id %q: %v", opts.Parent, err)
+ }
+
+ parentSnapshotID = &id
+ }
+
+ // Find last snapshot to set it as parent, if not already set
+ if !opts.Force && parentSnapshotID == nil {
+ id, err := restic.FindLatestSnapshot(context.TODO(), repo, target, []restic.TagList{opts.Tags}, opts.Hostname)
+ if err == nil {
+ parentSnapshotID = &id
+ } else if err != restic.ErrNoSnapshotFound {
+ return err
+ }
+ }
+
+ if parentSnapshotID != nil {
+ Verbosef("using parent snapshot %v\n", parentSnapshotID.Str())
+ }
+
+ Verbosef("scan %v\n", target)
+
+ selectFilter := func(item string, fi os.FileInfo) bool {
+ for _, reject := range rejectFuncs {
+ if reject(item, fi) {
+ return false
+ }
+ }
+ return true
+ }
+
+ stat, err := archiver.Scan(target, selectFilter, newScanProgress(gopts))
+ if err != nil {
+ return err
+ }
+
+ arch := archiver.New(repo)
+ arch.Excludes = opts.Excludes
+ arch.SelectFilter = selectFilter
+
+ arch.Warn = func(dir string, fi os.FileInfo, err error) {
+ // TODO: make ignoring errors configurable
+ Warnf("%s\rwarning for %s: %v\n", ClearLine(), dir, err)
+ }
+
+ timeStamp := time.Now()
+ if opts.TimeStamp != "" {
+ timeStamp, err = time.Parse(TimeFormat, opts.TimeStamp)
+ if err != nil {
+ return errors.Fatalf("error in time option: %v\n", err)
+ }
+ }
+
+ _, id, err := arch.Snapshot(context.TODO(), newArchiveProgress(gopts, stat), target, opts.Tags, opts.Hostname, parentSnapshotID, timeStamp)
+ if err != nil {
+ return err
+ }
+
+ Verbosef("snapshot %s saved\n", id.Str())
+
+ return nil
+}
+
+func readExcludePatternsFromFiles(excludeFiles []string) []string {
+ var excludes []string
+ for _, filename := range excludeFiles {
+ err := func() (err error) {
+ file, err := fs.Open(filename)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ // return pre-close error if there was one
+ if errClose := file.Close(); err == nil {
+ err = errClose
+ }
+ }()
+
+ scanner := bufio.NewScanner(file)
+ for scanner.Scan() {
+ line := strings.TrimSpace(scanner.Text())
+
+ // ignore empty lines
+ if line == "" {
+ continue
+ }
+
+ // strip comments
+ if strings.HasPrefix(line, "#") {
+ continue
+ }
+
+ line = os.ExpandEnv(line)
+ excludes = append(excludes, line)
+ }
+ return scanner.Err()
+ }()
+ if err != nil {
+ Warnf("error reading exclude patterns: %v:", err)
+ return nil
+ }
+ }
+ return excludes
+}
diff --git a/cmd/restic/cmd_cat.go b/cmd/restic/cmd_cat.go
new file mode 100644
index 000000000..8926f11a5
--- /dev/null
+++ b/cmd/restic/cmd_cat.go
@@ -0,0 +1,191 @@
+package main
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+
+ "github.com/spf13/cobra"
+
+ "github.com/restic/restic/internal/backend"
+ "github.com/restic/restic/internal/errors"
+ "github.com/restic/restic/internal/repository"
+ "github.com/restic/restic/internal/restic"
+)
+
+var cmdCat = &cobra.Command{
+ Use: "cat [flags] [pack|blob|snapshot|index|key|masterkey|config|lock] ID",
+ Short: "Print internal objects to stdout",
+ Long: `
+The "cat" command is used to print internal objects to stdout.
+`,
+ DisableAutoGenTag: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runCat(globalOptions, args)
+ },
+}
+
+func init() {
+ cmdRoot.AddCommand(cmdCat)
+}
+
+func runCat(gopts GlobalOptions, args []string) error {
+ if len(args) < 1 || (args[0] != "masterkey" && args[0] != "config" && len(args) != 2) {
+ return errors.Fatal("type or ID not specified")
+ }
+
+ repo, err := OpenRepository(gopts)
+ if err != nil {
+ return err
+ }
+
+ lock, err := lockRepo(repo)
+ defer unlockRepo(lock)
+ if err != nil {
+ return err
+ }
+
+ tpe := args[0]
+
+ var id restic.ID
+ if tpe != "masterkey" && tpe != "config" {
+ id, err = restic.ParseID(args[1])
+ if err != nil {
+ if tpe != "snapshot" {
+ return errors.Fatalf("unable to parse ID: %v\n", err)
+ }
+
+ // find snapshot id with prefix
+ id, err = restic.FindSnapshot(repo, args[1])
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ // handle all types that don't need an index
+ switch tpe {
+ case "config":
+ buf, err := json.MarshalIndent(repo.Config(), "", " ")
+ if err != nil {
+ return err
+ }
+
+ fmt.Println(string(buf))
+ return nil
+ case "index":
+ buf, err := repo.LoadAndDecrypt(context.TODO(), restic.IndexFile, id)
+ if err != nil {
+ return err
+ }
+
+ _, err = os.Stdout.Write(append(buf, '\n'))
+ return err
+
+ case "snapshot":
+ sn := &restic.Snapshot{}
+ err = repo.LoadJSONUnpacked(context.TODO(), restic.SnapshotFile, id, sn)
+ if err != nil {
+ return err
+ }
+
+ buf, err := json.MarshalIndent(&sn, "", " ")
+ if err != nil {
+ return err
+ }
+
+ fmt.Println(string(buf))
+
+ return nil
+ case "key":
+ h := restic.Handle{Type: restic.KeyFile, Name: id.String()}
+ buf, err := backend.LoadAll(context.TODO(), repo.Backend(), h)
+ if err != nil {
+ return err
+ }
+
+ key := &repository.Key{}
+ err = json.Unmarshal(buf, key)
+ if err != nil {
+ return err
+ }
+
+ buf, err = json.MarshalIndent(&key, "", " ")
+ if err != nil {
+ return err
+ }
+
+ fmt.Println(string(buf))
+ return nil
+ case "masterkey":
+ buf, err := json.MarshalIndent(repo.Key(), "", " ")
+ if err != nil {
+ return err
+ }
+
+ fmt.Println(string(buf))
+ return nil
+ case "lock":
+ lock, err := restic.LoadLock(context.TODO(), repo, id)
+ if err != nil {
+ return err
+ }
+
+ buf, err := json.MarshalIndent(&lock, "", " ")
+ if err != nil {
+ return err
+ }
+
+ fmt.Println(string(buf))
+
+ return nil
+ }
+
+ // load index, handle all the other types
+ err = repo.LoadIndex(context.TODO())
+ if err != nil {
+ return err
+ }
+
+ switch tpe {
+ case "pack":
+ h := restic.Handle{Type: restic.DataFile, Name: id.String()}
+ buf, err := backend.LoadAll(context.TODO(), repo.Backend(), h)
+ if err != nil {
+ return err
+ }
+
+ hash := restic.Hash(buf)
+ if !hash.Equal(id) {
+ fmt.Fprintf(stderr, "Warning: hash of data does not match ID, want\n %v\ngot:\n %v\n", id.String(), hash.String())
+ }
+
+ _, err = os.Stdout.Write(buf)
+ return err
+
+ case "blob":
+ for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} {
+ list, err := repo.Index().Lookup(id, t)
+ if err != nil {
+ continue
+ }
+ blob := list[0]
+
+ buf := make([]byte, blob.Length)
+ n, err := repo.LoadBlob(context.TODO(), t, id, buf)
+ if err != nil {
+ return err
+ }
+ buf = buf[:n]
+
+ _, err = os.Stdout.Write(buf)
+ return err
+ }
+
+ return errors.Fatal("blob not found")
+
+ default:
+ return errors.Fatal("invalid type")
+ }
+}
diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go
new file mode 100644
index 000000000..b64429a0e
--- /dev/null
+++ b/cmd/restic/cmd_check.go
@@ -0,0 +1,170 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/spf13/cobra"
+
+ "github.com/restic/restic/internal/checker"
+ "github.com/restic/restic/internal/errors"
+ "github.com/restic/restic/internal/restic"
+)
+
+var cmdCheck = &cobra.Command{
+ Use: "check [flags]",
+ Short: "Check the repository for errors",
+ Long: `
+The "check" command tests the repository for errors and reports any errors it
+finds. It can also be used to read all data and therefore simulate a restore.
+`,
+ DisableAutoGenTag: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runCheck(checkOptions, globalOptions, args)
+ },
+}
+
+// CheckOptions bundles all options for the 'check' command.
+type CheckOptions struct {
+ ReadData bool
+ CheckUnused bool
+}
+
+var checkOptions CheckOptions
+
+func init() {
+ cmdRoot.AddCommand(cmdCheck)
+
+ f := cmdCheck.Flags()
+ f.BoolVar(&checkOptions.ReadData, "read-data", false, "read all data blobs")
+ f.BoolVar(&checkOptions.CheckUnused, "check-unused", false, "find unused blobs")
+}
+
+func newReadProgress(gopts GlobalOptions, todo restic.Stat) *restic.Progress {
+ if gopts.Quiet {
+ return nil
+ }
+
+ readProgress := restic.NewProgress()
+
+ readProgress.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) {
+ status := fmt.Sprintf("[%s] %s %d / %d items",
+ formatDuration(d),
+ formatPercent(s.Blobs, todo.Blobs),
+ s.Blobs, todo.Blobs)
+
+ if w := stdoutTerminalWidth(); w > 0 {
+ if len(status) > w {
+ max := w - len(status) - 4
+ status = status[:max] + "... "
+ }
+ }
+
+ PrintProgress("%s", status)
+ }
+
+ readProgress.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
+ fmt.Printf("\nduration: %s\n", formatDuration(d))
+ }
+
+ return readProgress
+}
+
+func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
+ if len(args) != 0 {
+ return errors.Fatal("check has no arguments")
+ }
+
+ repo, err := OpenRepository(gopts)
+ if err != nil {
+ return err
+ }
+
+ if !gopts.NoLock {
+ Verbosef("Create exclusive lock for repository\n")
+ lock, err := lockRepoExclusive(repo)
+ defer unlockRepo(lock)
+ if err != nil {
+ return err
+ }
+ }
+
+ chkr := checker.New(repo)
+
+ Verbosef("Load indexes\n")
+ hints, errs := chkr.LoadIndex(context.TODO())
+
+ dupFound := false
+ for _, hint := range hints {
+ Printf("%v\n", hint)
+ if _, ok := hint.(checker.ErrDuplicatePacks); ok {
+ dupFound = true
+ }
+ }
+
+ if dupFound {
+ Printf("\nrun `restic rebuild-index' to correct this\n")
+ }
+
+ if len(errs) > 0 {
+ for _, err := range errs {
+ Warnf("error: %v\n", err)
+ }
+ return errors.Fatal("LoadIndex returned errors")
+ }
+
+ errorsFound := false
+ errChan := make(chan error)
+
+ Verbosef("Check all packs\n")
+ go chkr.Packs(context.TODO(), errChan)
+
+ for err := range errChan {
+ errorsFound = true
+ fmt.Fprintf(os.Stderr, "%v\n", err)
+ }
+
+ Verbosef("Check snapshots, trees and blobs\n")
+ errChan = make(chan error)
+ go chkr.Structure(context.TODO(), errChan)
+
+ for err := range errChan {
+ errorsFound = true
+ if e, ok := err.(checker.TreeError); ok {
+ fmt.Fprintf(os.Stderr, "error for tree %v:\n", e.ID.Str())
+ for _, treeErr := range e.Errors {
+ fmt.Fprintf(os.Stderr, " %v\n", treeErr)
+ }
+ } else {
+ fmt.Fprintf(os.Stderr, "error: %v\n", err)
+ }
+ }
+
+ if opts.CheckUnused {
+ for _, id := range chkr.UnusedBlobs() {
+ Verbosef("unused blob %v\n", id.Str())
+ errorsFound = true
+ }
+ }
+
+ if opts.ReadData {
+ Verbosef("Read all data\n")
+
+ p := newReadProgress(gopts, restic.Stat{Blobs: chkr.CountPacks()})
+ errChan := make(chan error)
+
+ go chkr.ReadData(context.TODO(), p, errChan)
+
+ for err := range errChan {
+ errorsFound = true
+ fmt.Fprintf(os.Stderr, "%v\n", err)
+ }
+ }
+
+ if errorsFound {
+ return errors.Fatal("repository contains errors")
+ }
+ return nil
+}
diff --git a/cmd/restic/cmd_dump.go b/cmd/restic/cmd_dump.go
new file mode 100644
index 000000000..215de8dad
--- /dev/null
+++ b/cmd/restic/cmd_dump.go
@@ -0,0 +1,211 @@
+// xbuild debug
+
+package main
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/spf13/cobra"
+
+ "github.com/restic/restic/internal/errors"
+ "github.com/restic/restic/internal/pack"
+ "github.com/restic/restic/internal/repository"
+ "github.com/restic/restic/internal/restic"
+
+ "github.com/restic/restic/internal/worker"
+)
+
+var cmdDump = &cobra.Command{
+ Use: "dump [indexes|snapshots|trees|all|packs]",
+ Short: "Dump data structures",
+ Long: `
+The "dump" command dumps data structures from the repository as JSON objects. It
+is used for debugging purposes only.`,
+ DisableAutoGenTag: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runDump(globalOptions, args)
+ },
+}
+
+func init() {
+ cmdRoot.AddCommand(cmdDump)
+}
+
+func prettyPrintJSON(wr io.Writer, item interface{}) error {
+ buf, err := json.MarshalIndent(item, "", " ")
+ if err != nil {
+ return err
+ }
+
+ _, err = wr.Write(append(buf, '\n'))
+ return err
+}
+
+func debugPrintSnapshots(repo *repository.Repository, wr io.Writer) error {
+ for id := range repo.List(context.TODO(), restic.SnapshotFile) {
+ snapshot, err := restic.LoadSnapshot(context.TODO(), repo, id)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "LoadSnapshot(%v): %v", id.Str(), err)
+ continue
+ }
+
+ fmt.Fprintf(wr, "snapshot_id: %v\n", id)
+
+ err = prettyPrintJSON(wr, snapshot)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+const dumpPackWorkers = 10
+
+// Pack is the struct used in printPacks.
+type Pack struct {
+ Name string `json:"name"`
+
+ Blobs []Blob `json:"blobs"`
+}
+
+// Blob is the struct used in printPacks.
+type Blob struct {
+ Type restic.BlobType `json:"type"`
+ Length uint `json:"length"`
+ ID restic.ID `json:"id"`
+ Offset uint `json:"offset"`
+}
+
+func printPacks(repo *repository.Repository, wr io.Writer) error {
+ f := func(ctx context.Context, job worker.Job) (interface{}, error) {
+ name := job.Data.(string)
+
+ h := restic.Handle{Type: restic.DataFile, Name: name}
+
+ blobInfo, err := repo.Backend().Stat(ctx, h)
+ if err != nil {
+ return nil, err
+ }
+
+ blobs, err := pack.List(repo.Key(), restic.ReaderAt(repo.Backend(), h), blobInfo.Size)
+ if err != nil {
+ return nil, err
+ }
+
+ return blobs, nil
+ }
+
+ jobCh := make(chan worker.Job)
+ resCh := make(chan worker.Job)
+ wp := worker.New(context.TODO(), dumpPackWorkers, f, jobCh, resCh)
+
+ go func() {
+ for name := range repo.Backend().List(context.TODO(), restic.DataFile) {
+ jobCh <- worker.Job{Data: name}
+ }
+ close(jobCh)
+ }()
+
+ for job := range resCh {
+ name := job.Data.(string)
+
+ if job.Error != nil {
+ fmt.Fprintf(os.Stderr, "error for pack %v: %v\n", name, job.Error)
+ continue
+ }
+
+ entries := job.Result.([]restic.Blob)
+ p := Pack{
+ Name: name,
+ Blobs: make([]Blob, len(entries)),
+ }
+ for i, blob := range entries {
+ p.Blobs[i] = Blob{
+ Type: blob.Type,
+ Length: blob.Length,
+ ID: blob.ID,
+ Offset: blob.Offset,
+ }
+ }
+
+ prettyPrintJSON(os.Stdout, p)
+ }
+
+ wp.Wait()
+
+ return nil
+}
+
+func dumpIndexes(repo restic.Repository) error {
+ for id := range repo.List(context.TODO(), restic.IndexFile) {
+ fmt.Printf("index_id: %v\n", id)
+
+ idx, err := repository.LoadIndex(context.TODO(), repo, id)
+ if err != nil {
+ return err
+ }
+
+ err = idx.Dump(os.Stdout)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func runDump(gopts GlobalOptions, args []string) error {
+ if len(args) != 1 {
+ return errors.Fatal("type not specified")
+ }
+
+ repo, err := OpenRepository(gopts)
+ if err != nil {
+ return err
+ }
+
+ if !gopts.NoLock {
+ lock, err := lockRepo(repo)
+ defer unlockRepo(lock)
+ if err != nil {
+ return err
+ }
+ }
+
+ err = repo.LoadIndex(context.TODO())
+ if err != nil {
+ return err
+ }
+
+ tpe := args[0]
+
+ switch tpe {
+ case "indexes":
+ return dumpIndexes(repo)
+ case "snapshots":
+ return debugPrintSnapshots(repo, os.Stdout)
+ case "packs":
+ return printPacks(repo, os.Stdout)
+ case "all":
+ fmt.Printf("snapshots:\n")
+ err := debugPrintSnapshots(repo, os.Stdout)
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("\nindexes:\n")
+ err = dumpIndexes(repo)
+ if err != nil {
+ return err
+ }
+
+ return nil
+ default:
+ return errors.Fatalf("no such type %q", tpe)
+ }
+}
diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go
new file mode 100644
index 000000000..16e631ef9
--- /dev/null
+++ b/cmd/restic/cmd_find.go
@@ -0,0 +1,308 @@
+package main
+
+import (
+ "context"
+ "encoding/json"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/spf13/cobra"
+
+ "github.com/restic/restic/internal/debug"
+ "github.com/restic/restic/internal/errors"
+ "github.com/restic/restic/internal/restic"
+)
+
+var cmdFind = &cobra.Command{
+ Use: "find [flags] PATTERN",
+ Short: "Find a file or directory",
+ Long: `
+The "find" command searches for files or directories in snapshots stored in the
+repo. `,
+ DisableAutoGenTag: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runFind(findOptions, globalOptions, args)
+ },
+}
+
+// FindOptions bundles all options for the find command.
+type FindOptions struct {
+ Oldest string
+ Newest string
+ Snapshots []string
+ CaseInsensitive bool
+ ListLong bool
+ Host string
+ Paths []string
+ Tags restic.TagLists
+}
+
+var findOptions FindOptions
+
+func init() {
+ cmdRoot.AddCommand(cmdFind)
+
+ f := cmdFind.Flags()
+ f.StringVarP(&findOptions.Oldest, "oldest", "O", "", "oldest modification date/time")
+ f.StringVarP(&findOptions.Newest, "newest", "N", "", "newest modification date/time")
+ f.StringArrayVarP(&findOptions.Snapshots, "snapshot", "s", nil, "snapshot `id` to search in (can be given multiple times)")
+ f.BoolVarP(&findOptions.CaseInsensitive, "ignore-case", "i", false, "ignore case for pattern")
+ f.BoolVarP(&findOptions.ListLong, "long", "l", false, "use a long listing format showing size and mode")
+
+ f.StringVarP(&findOptions.Host, "host", "H", "", "only consider snapshots for this `host`, when no snapshot ID is given")
+ f.Var(&findOptions.Tags, "tag", "only consider snapshots which include this `taglist`, when no snapshot-ID is given")
+ f.StringArrayVar(&findOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path`, when no snapshot-ID is given")
+}
+
+type findPattern struct {
+ oldest, newest time.Time
+ pattern string
+ ignoreCase bool
+}
+
+var timeFormats = []string{
+ "2006-01-02",
+ "2006-01-02 15:04",
+ "2006-01-02 15:04:05",
+ "2006-01-02 15:04:05 -0700",
+ "2006-01-02 15:04:05 MST",
+ "02.01.2006",
+ "02.01.2006 15:04",
+ "02.01.2006 15:04:05",
+ "02.01.2006 15:04:05 -0700",
+ "02.01.2006 15:04:05 MST",
+ "Mon Jan 2 15:04:05 -0700 MST 2006",
+}
+
+func parseTime(str string) (time.Time, error) {
+ for _, fmt := range timeFormats {
+ if t, err := time.ParseInLocation(fmt, str, time.Local); err == nil {
+ return t, nil
+ }
+ }
+
+ return time.Time{}, errors.Fatalf("unable to parse time: %q", str)
+}
+
+type statefulOutput struct {
+ ListLong bool
+ JSON bool
+ inuse bool
+ newsn *restic.Snapshot
+ oldsn *restic.Snapshot
+ hits int
+}
+
+func (s *statefulOutput) PrintJSON(prefix string, node *restic.Node) {
+ type findNode restic.Node
+ b, err := json.Marshal(struct {
+ // Add these attributes
+ Path string `json:"path,omitempty"`
+ Permissions string `json:"permissions,omitempty"`
+
+ *findNode
+
+ // Make the following attributes disappear
+ Name byte `json:"name,omitempty"`
+ Inode byte `json:"inode,omitempty"`
+ ExtendedAttributes byte `json:"extended_attributes,omitempty"`
+ Device byte `json:"device,omitempty"`
+ Content byte `json:"content,omitempty"`
+ Subtree byte `json:"subtree,omitempty"`
+ }{
+ Path: filepath.Join(prefix, node.Name),
+ Permissions: node.Mode.String(),
+ findNode: (*findNode)(node),
+ })
+ if err != nil {
+ Warnf("Marshall failed: %v\n", err)
+ return
+ }
+ if !s.inuse {
+ Printf("[")
+ s.inuse = true
+ }
+ if s.newsn != s.oldsn {
+ if s.oldsn != nil {
+ Printf("],\"hits\":%d,\"snapshot\":%q},", s.hits, s.oldsn.ID())
+ }
+ Printf(`{"matches":[`)
+ s.oldsn = s.newsn
+ s.hits = 0
+ }
+ if s.hits > 0 {
+ Printf(",")
+ }
+ Printf(string(b))
+ s.hits++
+}
+
+func (s *statefulOutput) PrintNormal(prefix string, node *restic.Node) {
+ if s.newsn != s.oldsn {
+ if s.oldsn != nil {
+ Verbosef("\n")
+ }
+ s.oldsn = s.newsn
+ Verbosef("Found matching entries in snapshot %s\n", s.oldsn.ID())
+ }
+ Printf(formatNode(prefix, node, s.ListLong) + "\n")
+}
+
+func (s *statefulOutput) Print(prefix string, node *restic.Node) {
+ if s.JSON {
+ s.PrintJSON(prefix, node)
+ } else {
+ s.PrintNormal(prefix, node)
+ }
+}
+
+func (s *statefulOutput) Finish() {
+ if s.JSON {
+ // do some finishing up
+ if s.oldsn != nil {
+ Printf("],\"hits\":%d,\"snapshot\":%q}", s.hits, s.oldsn.ID())
+ }
+ if s.inuse {
+ Printf("]\n")
+ } else {
+ Printf("[]\n")
+ }
+ return
+ }
+}
+
+// Finder bundles information needed to find a file or directory.
+type Finder struct {
+ repo restic.Repository
+ pat findPattern
+ out statefulOutput
+ notfound restic.IDSet
+}
+
+func (f *Finder) findInTree(treeID restic.ID, prefix string) error {
+ if f.notfound.Has(treeID) {
+ debug.Log("%v skipping tree %v, has already been checked", prefix, treeID.Str())
+ return nil
+ }
+
+ debug.Log("%v checking tree %v\n", prefix, treeID.Str())
+
+ tree, err := f.repo.LoadTree(context.TODO(), treeID)
+ if err != nil {
+ return err
+ }
+
+ var found bool
+ for _, node := range tree.Nodes {
+ debug.Log(" testing entry %q\n", node.Name)
+
+ name := node.Name
+ if f.pat.ignoreCase {
+ name = strings.ToLower(name)
+ }
+
+ m, err := filepath.Match(f.pat.pattern, name)
+ if err != nil {
+ return err
+ }
+
+ if m {
+ if !f.pat.oldest.IsZero() && node.ModTime.Before(f.pat.oldest) {
+ debug.Log(" ModTime is older than %s\n", f.pat.oldest)
+ continue
+ }
+
+ if !f.pat.newest.IsZero() && node.ModTime.After(f.pat.newest) {
+ debug.Log(" ModTime is newer than %s\n", f.pat.newest)
+ continue
+ }
+
+ debug.Log(" found match\n")
+ found = true
+ f.out.Print(prefix, node)
+ }
+
+ if node.Type == "dir" {
+ if err := f.findInTree(*node.Subtree, filepath.Join(prefix, node.Name)); err != nil {
+ return err
+ }
+ }
+ }
+
+ if !found {
+ f.notfound.Insert(treeID)
+ }
+
+ return nil
+}
+
+func (f *Finder) findInSnapshot(sn *restic.Snapshot) error {
+ debug.Log("searching in snapshot %s\n for entries within [%s %s]", sn.ID(), f.pat.oldest, f.pat.newest)
+
+ f.out.newsn = sn
+ if err := f.findInTree(*sn.Tree, string(filepath.Separator)); err != nil {
+ return err
+ }
+ return nil
+}
+
+func runFind(opts FindOptions, gopts GlobalOptions, args []string) error {
+ if len(args) != 1 {
+ return errors.Fatal("wrong number of arguments")
+ }
+
+ var err error
+ pat := findPattern{pattern: args[0]}
+ if opts.CaseInsensitive {
+ pat.pattern = strings.ToLower(pat.pattern)
+ pat.ignoreCase = true
+ }
+
+ if opts.Oldest != "" {
+ if pat.oldest, err = parseTime(opts.Oldest); err != nil {
+ return err
+ }
+ }
+
+ if opts.Newest != "" {
+ if pat.newest, err = parseTime(opts.Newest); err != nil {
+ return err
+ }
+ }
+
+ repo, err := OpenRepository(gopts)
+ if err != nil {
+ return err
+ }
+
+ if !gopts.NoLock {
+ lock, err := lockRepo(repo)
+ defer unlockRepo(lock)
+ if err != nil {
+ return err
+ }
+ }
+
+ if err = repo.LoadIndex(context.TODO()); err != nil {
+ return err
+ }
+
+ ctx, cancel := context.WithCancel(gopts.ctx)
+ defer cancel()
+
+ f := &Finder{
+ repo: repo,
+ pat: pat,
+ out: statefulOutput{ListLong: opts.ListLong, JSON: globalOptions.JSON},
+ notfound: restic.NewIDSet(),
+ }
+ for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, opts.Snapshots) {
+ if err = f.findInSnapshot(sn); err != nil {
+ return err
+ }
+ }
+ f.out.Finish()
+
+ return nil
+}
diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go
new file mode 100644
index 000000000..e26e33696
--- /dev/null
+++ b/cmd/restic/cmd_forget.go
@@ -0,0 +1,238 @@
+package main
+
+import (
+ "context"
+ "encoding/json"
+ "sort"
+ "strings"
+
+ "github.com/restic/restic/internal/errors"
+ "github.com/restic/restic/internal/restic"
+ "github.com/spf13/cobra"
+)
+
+var cmdForget = &cobra.Command{
+ Use: "forget [flags] [snapshot ID] [...]",
+ Short: "Remove snapshots from the repository",
+ Long: `
+The "forget" command removes snapshots according to a policy. Please note that
+this command really only deletes the snapshot object in the repository, which
+is a reference to data stored there. In order to remove this (now unreferenced)
+data after 'forget' was run successfully, see the 'prune' command. `,
+ DisableAutoGenTag: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runForget(forgetOptions, globalOptions, args)
+ },
+}
+
+// ForgetOptions collects all options for the forget command.
+type ForgetOptions struct {
+ Last int
+ Hourly int
+ Daily int
+ Weekly int
+ Monthly int
+ Yearly int
+ KeepTags restic.TagLists
+
+ Host string
+ Tags restic.TagLists
+ Paths []string
+
+ // Grouping
+ GroupBy string
+ DryRun bool
+ Prune bool
+}
+
+var forgetOptions ForgetOptions
+
+func init() {
+ cmdRoot.AddCommand(cmdForget)
+
+ f := cmdForget.Flags()
+ f.IntVarP(&forgetOptions.Last, "keep-last", "l", 0, "keep the last `n` snapshots")
+ f.IntVarP(&forgetOptions.Hourly, "keep-hourly", "H", 0, "keep the last `n` hourly snapshots")
+ f.IntVarP(&forgetOptions.Daily, "keep-daily", "d", 0, "keep the last `n` daily snapshots")
+ f.IntVarP(&forgetOptions.Weekly, "keep-weekly", "w", 0, "keep the last `n` weekly snapshots")
+ f.IntVarP(&forgetOptions.Monthly, "keep-monthly", "m", 0, "keep the last `n` monthly snapshots")
+ f.IntVarP(&forgetOptions.Yearly, "keep-yearly", "y", 0, "keep the last `n` yearly snapshots")
+
+ f.Var(&forgetOptions.KeepTags, "keep-tag", "keep snapshots with this `taglist` (can be specified multiple times)")
+ // Sadly the commonly used shortcut `H` is already used.
+ f.StringVar(&forgetOptions.Host, "host", "", "only consider snapshots with the given `host`")
+ // Deprecated since 2017-03-07.
+ f.StringVar(&forgetOptions.Host, "hostname", "", "only consider snapshots with the given `hostname` (deprecated)")
+ f.Var(&forgetOptions.Tags, "tag", "only consider snapshots which include this `taglist` in the format `tag[,tag,...]` (can be specified multiple times)")
+ f.StringArrayVar(&forgetOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path` (can be specified multiple times)")
+
+ f.StringVarP(&forgetOptions.GroupBy, "group-by", "g", "host,paths", "string for grouping snapshots by host,paths,tags")
+ f.BoolVarP(&forgetOptions.DryRun, "dry-run", "n", false, "do not delete anything, just print what would be done")
+ f.BoolVar(&forgetOptions.Prune, "prune", false, "automatically run the 'prune' command if snapshots have been removed")
+
+ f.SortFlags = false
+}
+
+func runForget(opts ForgetOptions, gopts GlobalOptions, args []string) error {
+ repo, err := OpenRepository(gopts)
+ if err != nil {
+ return err
+ }
+
+ lock, err := lockRepoExclusive(repo)
+ defer unlockRepo(lock)
+ if err != nil {
+ return err
+ }
+
+ // group by hostname and dirs
+ type key struct {
+ Hostname string
+ Paths []string
+ Tags []string
+ }
+ snapshotGroups := make(map[string]restic.Snapshots)
+
+ var GroupByTag bool
+ var GroupByHost bool
+ var GroupByPath bool
+ var GroupOptionList []string
+
+ GroupOptionList = strings.Split(opts.GroupBy, ",")
+
+ for _, option := range GroupOptionList {
+ switch option {
+ case "host":
+ GroupByHost = true
+ case "paths":
+ GroupByPath = true
+ case "tags":
+ GroupByTag = true
+ case "":
+ default:
+ return errors.Fatal("unknown grouping option: '" + option + "'")
+ }
+ }
+
+ ctx, cancel := context.WithCancel(gopts.ctx)
+ defer cancel()
+ for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, args) {
+ if len(args) > 0 {
+ // When explicit snapshots args are given, remove them immediately.
+ if !opts.DryRun {
+ h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
+ if err = repo.Backend().Remove(context.TODO(), h); err != nil {
+ return err
+ }
+ Verbosef("removed snapshot %v\n", sn.ID().Str())
+ } else {
+ Verbosef("would have removed snapshot %v\n", sn.ID().Str())
+ }
+ } else {
+ // Determing grouping-keys
+ var tags []string
+ var hostname string
+ var paths []string
+
+ if GroupByTag {
+ tags = sn.Tags
+ sort.StringSlice(tags).Sort()
+ }
+ if GroupByHost {
+ hostname = sn.Hostname
+ }
+ if GroupByPath {
+ paths = sn.Paths
+ }
+
+ sort.StringSlice(sn.Paths).Sort()
+ var k []byte
+ var err error
+
+ k, err = json.Marshal(key{Tags: tags, Hostname: hostname, Paths: paths})
+
+ if err != nil {
+ return err
+ }
+ snapshotGroups[string(k)] = append(snapshotGroups[string(k)], sn)
+ }
+ }
+ if len(args) > 0 {
+ return nil
+ }
+
+ policy := restic.ExpirePolicy{
+ Last: opts.Last,
+ Hourly: opts.Hourly,
+ Daily: opts.Daily,
+ Weekly: opts.Weekly,
+ Monthly: opts.Monthly,
+ Yearly: opts.Yearly,
+ Tags: opts.KeepTags,
+ }
+
+ if policy.Empty() {
+ Verbosef("no policy was specified, no snapshots will be removed\n")
+ return nil
+ }
+
+ removeSnapshots := 0
+ for k, snapshotGroup := range snapshotGroups {
+ var key key
+ if json.Unmarshal([]byte(k), &key) != nil {
+ return err
+ }
+
+ // Info
+ Verbosef("snapshots")
+ var infoStrings []string
+ if GroupByTag {
+ infoStrings = append(infoStrings, "tags ["+strings.Join(key.Tags, ", ")+"]")
+ }
+ if GroupByHost {
+ infoStrings = append(infoStrings, "host ["+key.Hostname+"]")
+ }
+ if GroupByPath {
+ infoStrings = append(infoStrings, "paths ["+strings.Join(key.Paths, ", ")+"]")
+ }
+ if infoStrings != nil {
+ Verbosef(" for (" + strings.Join(infoStrings, ", ") + ")")
+ }
+ Verbosef(":\n\n")
+
+ keep, remove := restic.ApplyPolicy(snapshotGroup, policy)
+
+ if len(keep) != 0 && !gopts.Quiet {
+ Printf("keep %d snapshots:\n", len(keep))
+ PrintSnapshots(globalOptions.stdout, keep, false)
+ Printf("\n")
+ }
+
+ if len(remove) != 0 && !gopts.Quiet {
+ Printf("remove %d snapshots:\n", len(remove))
+ PrintSnapshots(globalOptions.stdout, remove, false)
+ Printf("\n")
+ }
+
+ removeSnapshots += len(remove)
+
+ if !opts.DryRun {
+ for _, sn := range remove {
+ h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
+ err = repo.Backend().Remove(context.TODO(), h)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ if removeSnapshots > 0 && opts.Prune {
+ Verbosef("%d snapshots have been removed, running prune\n", removeSnapshots)
+ if !opts.DryRun {
+ return pruneRepository(gopts, repo)
+ }
+ }
+
+ return nil
+}
diff --git a/cmd/restic/cmd_init.go b/cmd/restic/cmd_init.go
new file mode 100644
index 000000000..d4bcd9a0e
--- /dev/null
+++ b/cmd/restic/cmd_init.go
@@ -0,0 +1,59 @@
+package main
+
+import (
+ "context"
+
+ "github.com/restic/restic/internal/errors"
+ "github.com/restic/restic/internal/repository"
+
+ "github.com/spf13/cobra"
+)
+
+var cmdInit = &cobra.Command{
+ Use: "init",
+ Short: "Initialize a new repository",
+ Long: `
+The "init" command initializes a new repository.
+`,
+ DisableAutoGenTag: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runInit(globalOptions, args)
+ },
+}
+
+func init() {
+ cmdRoot.AddCommand(cmdInit)
+}
+
+func runInit(gopts GlobalOptions, args []string) error {
+ if gopts.Repo == "" {
+ return errors.Fatal("Please specify repository location (-r)")
+ }
+
+ be, err := create(gopts.Repo, gopts.extended)
+ if err != nil {
+ return errors.Fatalf("create backend at %s failed: %v\n", gopts.Repo, err)
+ }
+
+ gopts.password, err = ReadPasswordTwice(gopts,
+ "enter password for new backend: ",
+ "enter password again: ")
+ if err != nil {
+ return err
+ }
+
+ s := repository.New(be)
+
+ err = s.Init(context.TODO(), gopts.password)
+ if err != nil {
+ return errors.Fatalf("create key in backend at %s failed: %v\n", gopts.Repo, err)
+ }
+
+ Verbosef("created restic backend %v at %s\n", s.Config().ID[:10], gopts.Repo)
+ Verbosef("\n")
+ Verbosef("Please note that knowledge of your password is required to access\n")
+ Verbosef("the repository. Losing your password means that your data is\n")
+ Verbosef("irrecoverably lost.\n")
+
+ return nil
+}
diff --git a/cmd/restic/cmd_key.go b/cmd/restic/cmd_key.go
new file mode 100644
index 000000000..1fbbd0f3f
--- /dev/null
+++ b/cmd/restic/cmd_key.go
@@ -0,0 +1,180 @@
+package main
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/restic/restic/internal/errors"
+ "github.com/restic/restic/internal/repository"
+ "github.com/restic/restic/internal/restic"
+
+ "github.com/spf13/cobra"
+)
+
+var cmdKey = &cobra.Command{
+ Use: "key [list|add|remove|passwd] [ID]",
+ Short: "Manage keys (passwords)",
+ Long: `
+The "key" command manages keys (passwords) for accessing the repository.
+`,
+ DisableAutoGenTag: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runKey(globalOptions, args)
+ },
+}
+
+func init() {
+ cmdRoot.AddCommand(cmdKey)
+}
+
+func listKeys(ctx context.Context, s *repository.Repository) error {
+ tab := NewTable()
+ tab.Header = fmt.Sprintf(" %-10s %-10s %-10s %s", "ID", "User", "Host", "Created")
+ tab.RowFormat = "%s%-10s %-10s %-10s %s"
+
+ for id := range s.List(ctx, restic.KeyFile) {
+ k, err := repository.LoadKey(ctx, s, id.String())
+ if err != nil {
+ Warnf("LoadKey() failed: %v\n", err)
+ continue
+ }
+
+ var current string
+ if id.String() == s.KeyName() {
+ current = "*"
+ } else {
+ current = " "
+ }
+ tab.Rows = append(tab.Rows, []interface{}{current, id.Str(),
+ k.Username, k.Hostname, k.Created.Format(TimeFormat)})
+ }
+
+ return tab.Write(globalOptions.stdout)
+}
+
+// testKeyNewPassword is used to set a new password during integration testing.
+var testKeyNewPassword string
+
+func getNewPassword(gopts GlobalOptions) (string, error) {
+ if testKeyNewPassword != "" {
+ return testKeyNewPassword, nil
+ }
+
+ // Since we already have an open repository, temporary remove the password
+ // to prompt the user for the passwd.
+ newopts := gopts
+ newopts.password = ""
+
+ return ReadPasswordTwice(newopts,
+ "enter password for new key: ",
+ "enter password again: ")
+}
+
+func addKey(gopts GlobalOptions, repo *repository.Repository) error {
+ pw, err := getNewPassword(gopts)
+ if err != nil {
+ return err
+ }
+
+ id, err := repository.AddKey(context.TODO(), repo, pw, repo.Key())
+ if err != nil {
+ return errors.Fatalf("creating new key failed: %v\n", err)
+ }
+
+ Verbosef("saved new key as %s\n", id)
+
+ return nil
+}
+
+func deleteKey(repo *repository.Repository, name string) error {
+ if name == repo.KeyName() {
+ return errors.Fatal("refusing to remove key currently used to access repository")
+ }
+
+ h := restic.Handle{Type: restic.KeyFile, Name: name}
+ err := repo.Backend().Remove(context.TODO(), h)
+ if err != nil {
+ return err
+ }
+
+ Verbosef("removed key %v\n", name)
+ return nil
+}
+
+func changePassword(gopts GlobalOptions, repo *repository.Repository) error {
+ pw, err := getNewPassword(gopts)
+ if err != nil {
+ return err
+ }
+
+ id, err := repository.AddKey(context.TODO(), repo, pw, repo.Key())
+ if err != nil {
+ return errors.Fatalf("creating new key failed: %v\n", err)
+ }
+
+ h := restic.Handle{Type: restic.KeyFile, Name: repo.KeyName()}
+ err = repo.Backend().Remove(context.TODO(), h)
+ if err != nil {
+ return err
+ }
+
+ Verbosef("saved new key as %s\n", id)
+
+ return nil
+}
+
+func runKey(gopts GlobalOptions, args []string) error {
+ if len(args) < 1 || (args[0] == "remove" && len(args) != 2) || (args[0] != "remove" && len(args) != 1) {
+ return errors.Fatal("wrong number of arguments")
+ }
+
+ ctx, cancel := context.WithCancel(gopts.ctx)
+ defer cancel()
+
+ repo, err := OpenRepository(gopts)
+ if err != nil {
+ return err
+ }
+
+ switch args[0] {
+ case "list":
+ lock, err := lockRepo(repo)
+ defer unlockRepo(lock)
+ if err != nil {
+ return err
+ }
+
+ return listKeys(ctx, repo)
+ case "add":
+ lock, err := lockRepo(repo)
+ defer unlockRepo(lock)
+ if err != nil {
+ return err
+ }
+
+ return addKey(gopts, repo)
+ case "remove":
+ lock, err := lockRepoExclusive(repo)
+ defer unlockRepo(lock)
+ if err != nil {
+ return err
+ }
+
+ id, err := restic.Find(repo.Backend(), restic.KeyFile, args[1])
+ if err != nil {
+ return err
+ }
+
+ return deleteKey(repo, id)
+ case "passwd":
+ lock, err := lockRepoExclusive(repo)
+ defer unlockRepo(lock)
+ if err != nil {
+ return err
+ }
+
+ return changePassword(gopts, repo)
+ }
+
+ return nil
+}
diff --git a/cmd/restic/cmd_list.go b/cmd/restic/cmd_list.go
new file mode 100644
index 000000000..ea8fb3876
--- /dev/null
+++ b/cmd/restic/cmd_list.go
@@ -0,0 +1,82 @@
+package main
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/restic/restic/internal/errors"
+ "github.com/restic/restic/internal/index"
+ "github.com/restic/restic/internal/restic"
+
+ "github.com/spf13/cobra"
+)
+
+var cmdList = &cobra.Command{
+ Use: "list [blobs|packs|index|snapshots|keys|locks]",
+ Short: "List objects in the repository",
+ Long: `
+The "list" command allows listing objects in the repository based on type.
+`,
+ DisableAutoGenTag: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runList(globalOptions, args)
+ },
+}
+
+func init() {
+ cmdRoot.AddCommand(cmdList)
+}
+
+func runList(opts GlobalOptions, args []string) error {
+ if len(args) != 1 {
+ return errors.Fatal("type not specified")
+ }
+
+ repo, err := OpenRepository(opts)
+ if err != nil {
+ return err
+ }
+
+ if !opts.NoLock {
+ lock, err := lockRepo(repo)
+ defer unlockRepo(lock)
+ if err != nil {
+ return err
+ }
+ }
+
+ var t restic.FileType
+ switch args[0] {
+ case "packs":
+ t = restic.DataFile
+ case "index":
+ t = restic.IndexFile
+ case "snapshots":
+ t = restic.SnapshotFile
+ case "keys":
+ t = restic.KeyFile
+ case "locks":
+ t = restic.LockFile
+ case "blobs":
+ idx, err := index.Load(context.TODO(), repo, nil)
+ if err != nil {
+ return err
+ }
+
+ for _, pack := range idx.Packs {
+ for _, entry := range pack.Entries {
+ fmt.Printf("%v %v\n", entry.Type, entry.ID)
+ }
+ }
+
+ return nil
+ default:
+ return errors.Fatal("invalid type")
+ }
+
+ for id := range repo.List(context.TODO(), t) {
+ Printf("%s\n", id)
+ }
+
+ return nil
+}
diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go
new file mode 100644
index 000000000..7ea0759a5
--- /dev/null
+++ b/cmd/restic/cmd_ls.go
@@ -0,0 +1,92 @@
+package main
+
+import (
+ "context"
+ "path/filepath"
+
+ "github.com/spf13/cobra"
+
+ "github.com/restic/restic/internal/errors"
+ "github.com/restic/restic/internal/repository"
+ "github.com/restic/restic/internal/restic"
+)
+
+var cmdLs = &cobra.Command{
+ Use: "ls [flags] [snapshot-ID ...]",
+ Short: "List files in a snapshot",
+ Long: `
+The "ls" command allows listing files and directories in a snapshot.
+
+The special snapshot-ID "latest" can be used to list files and directories of the latest snapshot in the repository.
+`,
+ DisableAutoGenTag: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runLs(lsOptions, globalOptions, args)
+ },
+}
+
+// LsOptions collects all options for the ls command.
+type LsOptions struct {
+ ListLong bool
+ Host string
+ Tags restic.TagLists
+ Paths []string
+}
+
+var lsOptions LsOptions
+
+func init() {
+ cmdRoot.AddCommand(cmdLs)
+
+ flags := cmdLs.Flags()
+ flags.BoolVarP(&lsOptions.ListLong, "long", "l", false, "use a long listing format showing size and mode")
+
+ flags.StringVarP(&lsOptions.Host, "host", "H", "", "only consider snapshots for this `host`, when no snapshot ID is given")
+ flags.Var(&lsOptions.Tags, "tag", "only consider snapshots which include this `taglist`, when no snapshot ID is given")
+ flags.StringArrayVar(&lsOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path`, when no snapshot ID is given")
+}
+
+func printTree(repo *repository.Repository, id *restic.ID, prefix string) error {
+ tree, err := repo.LoadTree(context.TODO(), *id)
+ if err != nil {
+ return err
+ }
+
+ for _, entry := range tree.Nodes {
+ Printf("%s\n", formatNode(prefix, entry, lsOptions.ListLong))
+
+ if entry.Type == "dir" && entry.Subtree != nil {
+ if err = printTree(repo, entry.Subtree, filepath.Join(prefix, entry.Name)); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func runLs(opts LsOptions, gopts GlobalOptions, args []string) error {
+ if len(args) == 0 && opts.Host == "" && len(opts.Tags) == 0 && len(opts.Paths) == 0 {
+ return errors.Fatal("Invalid arguments, either give one or more snapshot IDs or set filters.")
+ }
+
+ repo, err := OpenRepository(gopts)
+ if err != nil {
+ return err
+ }
+
+ if err = repo.LoadIndex(context.TODO()); err != nil {
+ return err
+ }
+
+ ctx, cancel := context.WithCancel(gopts.ctx)
+ defer cancel()
+ for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, args) {
+ Verbosef("snapshot %s of %v at %s):\n", sn.ID().Str(), sn.Paths, sn.Time)
+
+ if err = printTree(repo, sn.Tree, string(filepath.Separator)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/cmd/restic/cmd_manpage.go b/cmd/restic/cmd_manpage.go
new file mode 100644
index 000000000..1d39f4242
--- /dev/null
+++ b/cmd/restic/cmd_manpage.go
@@ -0,0 +1,70 @@
+package main
+
+import (
+ "os"
+ "time"
+
+ "github.com/restic/restic/internal/errors"
+ "github.com/spf13/cobra"
+ "github.com/spf13/cobra/doc"
+)
+
+var cmdManpage = &cobra.Command{
+ Use: "manpage [command]",
+ Short: "Generate manual pages",
+ Long: `
+The "manpage" command generates a manual page for a single command. It can also
+be used to write all manual pages to a directory. If the output directory is
+set and no command is specified, all manpages are written to the directory.
+`,
+ DisableAutoGenTag: true,
+ RunE: runManpage,
+}
+
+var manpageOpts = struct {
+ OutputDir string
+}{}
+
+func init() {
+ cmdRoot.AddCommand(cmdManpage)
+ fs := cmdManpage.Flags()
+ fs.StringVar(&manpageOpts.OutputDir, "output-dir", "", "write man pages to this `directory`")
+}
+
+func runManpage(cmd *cobra.Command, args []string) error {
+ // use a fixed date for the man pages so that generating them is deterministic
+ date, err := time.Parse("Jan 2006", "Jan 2017")
+ if err != nil {
+ return err
+ }
+
+ header := &doc.GenManHeader{
+ Title: "restic backup",
+ Section: "1",
+ Source: "generated by `restic manpage`",
+ Date: &date,
+ }
+
+ dir := manpageOpts.OutputDir
+ if dir != "" {
+ Verbosef("writing man pages to directory %v\n", dir)
+ return doc.GenManTree(cmdRoot, header, dir)
+ }
+
+ switch {
+ case len(args) == 0:
+ return errors.Fatalf("no command given")
+ case len(args) > 1:
+ return errors.Fatalf("more than one command given: %v", args)
+ }
+
+ name := args[0]
+
+ for _, cmd := range cmdRoot.Commands() {
+ if cmd.Name() == name {
+ return doc.GenMan(cmd, header, os.Stdout)
+ }
+ }
+
+ return errors.Fatalf("command %q is not known", args)
+}
diff --git a/cmd/restic/cmd_migrate.go b/cmd/restic/cmd_migrate.go
new file mode 100644
index 000000000..d5738595e
--- /dev/null
+++ b/cmd/restic/cmd_migrate.go
@@ -0,0 +1,108 @@
+package main
+
+import (
+ "github.com/restic/restic/internal/migrations"
+ "github.com/restic/restic/internal/restic"
+
+ "github.com/spf13/cobra"
+)
+
+var cmdMigrate = &cobra.Command{
+ Use: "migrate [name]",
+ Short: "Apply migrations",
+ Long: `
+The "migrate" command applies migrations to a repository. When no migration
+name is explicitly given, a list of migrations that can be applied is printed.
+`,
+ DisableAutoGenTag: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runMigrate(migrateOptions, globalOptions, args)
+ },
+}
+
+// MigrateOptions bundles all options for the 'check' command.
+type MigrateOptions struct {
+ Force bool
+}
+
+var migrateOptions MigrateOptions
+
+func init() {
+ cmdRoot.AddCommand(cmdMigrate)
+ f := cmdMigrate.Flags()
+ f.BoolVarP(&migrateOptions.Force, "force", "f", false, `apply a migration a second time`)
+}
+
+func checkMigrations(opts MigrateOptions, gopts GlobalOptions, repo restic.Repository) error {
+ ctx := gopts.ctx
+ Printf("available migrations:\n")
+ for _, m := range migrations.All {
+ ok, err := m.Check(ctx, repo)
+ if err != nil {
+ return err
+ }
+
+ if ok {
+ Printf(" %v: %v\n", m.Name(), m.Desc())
+ }
+ }
+
+ return nil
+}
+
+func applyMigrations(opts MigrateOptions, gopts GlobalOptions, repo restic.Repository, args []string) error {
+ ctx := gopts.ctx
+
+ var firsterr error
+ for _, name := range args {
+ for _, m := range migrations.All {
+ if m.Name() == name {
+ ok, err := m.Check(ctx, repo)
+ if err != nil {
+ return err
+ }
+
+ if !ok {
+ if !opts.Force {
+ Warnf("migration %v cannot be applied: check failed\nIf you want to apply this migration anyway, re-run with option --force\n", m.Name())
+ continue
+ }
+
+ Warnf("check for migration %v failed, continuing anyway\n", m.Name())
+ }
+
+ Printf("applying migration %v...\n", m.Name())
+ if err = m.Apply(ctx, repo); err != nil {
+ Warnf("migration %v failed: %v\n", m.Name(), err)
+ if firsterr == nil {
+ firsterr = err
+ }
+ continue
+ }
+
+ Printf("migration %v: success\n", m.Name())
+ }
+ }
+ }
+
+ return firsterr
+}
+
+func runMigrate(opts MigrateOptions, gopts GlobalOptions, args []string) error {
+ repo, err := OpenRepository(gopts)
+ if err != nil {
+ return err
+ }
+
+ lock, err := lockRepoExclusive(repo)
+ defer unlockRepo(lock)
+ if err != nil {
+ return err
+ }
+
+ if len(args) == 0 {
+ return checkMigrations(opts, gopts, repo)
+ }
+
+ return applyMigrations(opts, gopts, repo, args)
+}
diff --git a/cmd/restic/cmd_mount.go b/cmd/restic/cmd_mount.go
new file mode 100644
index 000000000..8f24fdcc9
--- /dev/null
+++ b/cmd/restic/cmd_mount.go
@@ -0,0 +1,150 @@
+// +build !openbsd
+// +build !windows
+
+package main
+
+import (
+ "context"
+ "os"
+
+ "github.com/spf13/cobra"
+
+ "github.com/restic/restic/internal/debug"
+ "github.com/restic/restic/internal/errors"
+ "github.com/restic/restic/internal/restic"
+
+ resticfs "github.com/restic/restic/internal/fs"
+ "github.com/restic/restic/internal/fuse"
+
+ systemFuse "bazil.org/fuse"
+ "bazil.org/fuse/fs"
+)
+
+var cmdMount = &cobra.Command{
+ Use: "mount [flags] mountpoint",
+ Short: "Mount the repository",
+ Long: `
+The "mount" command mounts the repository via fuse to a directory. This is a
+read-only mount.
+`,
+ DisableAutoGenTag: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runMount(mountOptions, globalOptions, args)
+ },
+}
+
+// MountOptions collects all options for the mount command.
+type MountOptions struct {
+ OwnerRoot bool
+ AllowRoot bool
+ AllowOther bool
+ Host string
+ Tags restic.TagLists
+ Paths []string
+}
+
+var mountOptions MountOptions
+
+func init() {
+ cmdRoot.AddCommand(cmdMount)
+
+ mountFlags := cmdMount.Flags()
+ mountFlags.BoolVar(&mountOptions.OwnerRoot, "owner-root", false, "use 'root' as the owner of files and dirs")
+ mountFlags.BoolVar(&mountOptions.AllowRoot, "allow-root", false, "allow root user to access the data in the mounted directory")
+ mountFlags.BoolVar(&mountOptions.AllowOther, "allow-other", false, "allow other users to access the data in the mounted directory")
+
+ mountFlags.StringVarP(&mountOptions.Host, "host", "H", "", `only consider snapshots for this host`)
+ mountFlags.Var(&mountOptions.Tags, "tag", "only consider snapshots which include this `taglist`")
+ mountFlags.StringArrayVar(&mountOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path`")
+}
+
+func mount(opts MountOptions, gopts GlobalOptions, mountpoint string) error {
+ debug.Log("start mount")
+ defer debug.Log("finish mount")
+
+ repo, err := OpenRepository(gopts)
+ if err != nil {
+ return err
+ }
+
+ err = repo.LoadIndex(context.TODO())
+ if err != nil {
+ return err
+ }
+
+ if _, err := resticfs.Stat(mountpoint); os.IsNotExist(errors.Cause(err)) {
+ Verbosef("Mountpoint %s doesn't exist, creating it\n", mountpoint)
+ err = resticfs.Mkdir(mountpoint, os.ModeDir|0700)
+ if err != nil {
+ return err
+ }
+ }
+
+ mountOptions := []systemFuse.MountOption{
+ systemFuse.ReadOnly(),
+ systemFuse.FSName("restic"),
+ }
+
+ if opts.AllowRoot {
+ mountOptions = append(mountOptions, systemFuse.AllowRoot())
+ }
+
+ if opts.AllowOther {
+ mountOptions = append(mountOptions, systemFuse.AllowOther())
+ }
+
+ c, err := systemFuse.Mount(mountpoint, mountOptions...)
+ if err != nil {
+ return err
+ }
+
+ systemFuse.Debug = func(msg interface{}) {
+ debug.Log("fuse: %v", msg)
+ }
+
+ cfg := fuse.Config{
+ OwnerIsRoot: opts.OwnerRoot,
+ Host: opts.Host,
+ Tags: opts.Tags,
+ Paths: opts.Paths,
+ }
+ root, err := fuse.NewRoot(context.TODO(), repo, cfg)
+ if err != nil {
+ return err
+ }
+
+ Printf("Now serving the repository at %s\n", mountpoint)
+ Printf("Don't forget to umount after quitting!\n")
+
+ debug.Log("serving mount at %v", mountpoint)
+ err = fs.Serve(c, root)
+ if err != nil {
+ return err
+ }
+
+ <-c.Ready
+ return c.MountError
+}
+
+func umount(mountpoint string) error {
+ return systemFuse.Unmount(mountpoint)
+}
+
+func runMount(opts MountOptions, gopts GlobalOptions, args []string) error {
+ if len(args) == 0 {
+ return errors.Fatal("wrong number of parameters")
+ }
+
+ mountpoint := args[0]
+
+ AddCleanupHandler(func() error {
+ debug.Log("running umount cleanup handler for mount at %v", mountpoint)
+ err := umount(mountpoint)
+ if err != nil {
+ Warnf("unable to umount (maybe already umounted?): %v\n", err)
+ }
+ return nil
+ })
+
+ return mount(opts, gopts, mountpoint)
+}
diff --git a/cmd/restic/cmd_options.go b/cmd/restic/cmd_options.go
new file mode 100644
index 000000000..6edcbebec
--- /dev/null
+++ b/cmd/restic/cmd_options.go
@@ -0,0 +1,29 @@
+package main
+
+import (
+ "fmt"
+
+ "github.com/restic/restic/internal/options"
+
+ "github.com/spf13/cobra"
+)
+
+var optionsCmd = &cobra.Command{
+ Use: "options",
+ Short: "Print list of extended options",
+ Long: `
+The "options" command prints a list of extended options.
+`,
+ Hidden: true,
+ DisableAutoGenTag: true,
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("All Extended Options:\n")
+ for _, opt := range options.List() {
+ fmt.Printf(" %-15s %s\n", opt.Namespace+"."+opt.Name, opt.Text)
+ }
+ },
+}
+
+func init() {
+ cmdRoot.AddCommand(optionsCmd)
+}
diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go
new file mode 100644
index 000000000..8e3c5b53d
--- /dev/null
+++ b/cmd/restic/cmd_prune.go
@@ -0,0 +1,279 @@
+package main
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/restic/restic/internal/debug"
+ "github.com/restic/restic/internal/errors"
+ "github.com/restic/restic/internal/index"
+ "github.com/restic/restic/internal/repository"
+ "github.com/restic/restic/internal/restic"
+
+ "github.com/spf13/cobra"
+)
+
+var cmdPrune = &cobra.Command{
+ Use: "prune [flags]",
+ Short: "Remove unneeded data from the repository",
+ Long: `
+The "prune" command checks the repository and removes data that is not
+referenced and therefore not needed any more.
+`,
+ DisableAutoGenTag: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runPrune(globalOptions)
+ },
+}
+
+func init() {
+ cmdRoot.AddCommand(cmdPrune)
+}
+
+func shortenStatus(maxLength int, s string) string {
+ if len(s) <= maxLength {
+ return s
+ }
+
+ if maxLength < 3 {
+ return s[:maxLength]
+ }
+
+ return s[:maxLength-3] + "..."
+}
+
+// newProgressMax returns a progress that counts blobs.
+func newProgressMax(show bool, max uint64, description string) *restic.Progress {
+ if !show {
+ return nil
+ }
+
+ p := restic.NewProgress()
+
+ p.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) {
+ status := fmt.Sprintf("[%s] %s %d / %d %s",
+ formatDuration(d),
+ formatPercent(s.Blobs, max),
+ s.Blobs, max, description)
+
+ if w := stdoutTerminalWidth(); w > 0 {
+ status = shortenStatus(w, status)
+ }
+
+ PrintProgress("%s", status)
+ }
+
+ p.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
+ fmt.Printf("\n")
+ }
+
+ return p
+}
+
+func runPrune(gopts GlobalOptions) error {
+ repo, err := OpenRepository(gopts)
+ if err != nil {
+ return err
+ }
+
+ lock, err := lockRepoExclusive(repo)
+ defer unlockRepo(lock)
+ if err != nil {
+ return err
+ }
+
+ return pruneRepository(gopts, repo)
+}
+
+func pruneRepository(gopts GlobalOptions, repo restic.Repository) error {
+ ctx := gopts.ctx
+
+ err := repo.LoadIndex(ctx)
+ if err != nil {
+ return err
+ }
+
+ var stats struct {
+ blobs int
+ packs int
+ snapshots int
+ bytes int64
+ }
+
+ Verbosef("counting files in repo\n")
+ for range repo.List(ctx, restic.DataFile) {
+ stats.packs++
+ }
+
+ Verbosef("building new index for repo\n")
+
+ bar := newProgressMax(!gopts.Quiet, uint64(stats.packs), "packs")
+ idx, invalidFiles, err := index.New(ctx, repo, restic.NewIDSet(), bar)
+ if err != nil {
+ return err
+ }
+
+ for _, id := range invalidFiles {
+ Warnf("incomplete pack file (will be removed): %v\n", id)
+ }
+
+ blobs := 0
+ for _, pack := range idx.Packs {
+ stats.bytes += pack.Size
+ blobs += len(pack.Entries)
+ }
+ Verbosef("repository contains %v packs (%v blobs) with %v bytes\n",
+ len(idx.Packs), blobs, formatBytes(uint64(stats.bytes)))
+
+ blobCount := make(map[restic.BlobHandle]int)
+ duplicateBlobs := 0
+ duplicateBytes := 0
+
+ // find duplicate blobs
+ for _, p := range idx.Packs {
+ for _, entry := range p.Entries {
+ stats.blobs++
+ h := restic.BlobHandle{ID: entry.ID, Type: entry.Type}
+ blobCount[h]++
+
+ if blobCount[h] > 1 {
+ duplicateBlobs++
+ duplicateBytes += int(entry.Length)
+ }
+ }
+ }
+
+ Verbosef("processed %d blobs: %d duplicate blobs, %v duplicate\n",
+ stats.blobs, duplicateBlobs, formatBytes(uint64(duplicateBytes)))
+ Verbosef("load all snapshots\n")
+
+ // find referenced blobs
+ snapshots, err := restic.LoadAllSnapshots(ctx, repo)
+ if err != nil {
+ return err
+ }
+
+ stats.snapshots = len(snapshots)
+
+ Verbosef("find data that is still in use for %d snapshots\n", stats.snapshots)
+
+ usedBlobs := restic.NewBlobSet()
+ seenBlobs := restic.NewBlobSet()
+
+ bar = newProgressMax(!gopts.Quiet, uint64(len(snapshots)), "snapshots")
+ bar.Start()
+ for _, sn := range snapshots {
+ debug.Log("process snapshot %v", sn.ID().Str())
+
+ err = restic.FindUsedBlobs(ctx, repo, *sn.Tree, usedBlobs, seenBlobs)
+ if err != nil {
+ if repo.Backend().IsNotExist(err) {
+ return errors.Fatal("unable to load a tree from the repo: " + err.Error())
+ }
+
+ return err
+ }
+
+ debug.Log("processed snapshot %v", sn.ID().Str())
+ bar.Report(restic.Stat{Blobs: 1})
+ }
+ bar.Done()
+
+ if len(usedBlobs) > stats.blobs {
+ return errors.Fatalf("number of used blobs is larger than number of available blobs!\n" +
+ "Please report this error (along with the output of the 'prune' run) at\n" +
+ "https://github.com/restic/restic/issues/new")
+ }
+
+ Verbosef("found %d of %d data blobs still in use, removing %d blobs\n",
+ len(usedBlobs), stats.blobs, stats.blobs-len(usedBlobs))
+
+ // find packs that need a rewrite
+ rewritePacks := restic.NewIDSet()
+ for _, pack := range idx.Packs {
+ for _, blob := range pack.Entries {
+ h := restic.BlobHandle{ID: blob.ID, Type: blob.Type}
+ if !usedBlobs.Has(h) {
+ rewritePacks.Insert(pack.ID)
+ continue
+ }
+
+ if blobCount[h] > 1 {
+ rewritePacks.Insert(pack.ID)
+ }
+ }
+ }
+
+ removeBytes := duplicateBytes
+
+ // find packs that are unneeded
+ removePacks := restic.NewIDSet()
+
+ Verbosef("will remove %d invalid files\n", len(invalidFiles))
+ for _, id := range invalidFiles {
+ removePacks.Insert(id)
+ }
+
+ for packID, p := range idx.Packs {
+
+ hasActiveBlob := false
+ for _, blob := range p.Entries {
+ h := restic.BlobHandle{ID: blob.ID, Type: blob.Type}
+ if usedBlobs.Has(h) {
+ hasActiveBlob = true
+ continue
+ }
+
+ removeBytes += int(blob.Length)
+ }
+
+ if hasActiveBlob {
+ continue
+ }
+
+ removePacks.Insert(packID)
+
+ if !rewritePacks.Has(packID) {
+ return errors.Fatalf("pack %v is unneeded, but not contained in rewritePacks", packID.Str())
+ }
+
+ rewritePacks.Delete(packID)
+ }
+
+ Verbosef("will delete %d packs and rewrite %d packs, this frees %s\n",
+ len(removePacks), len(rewritePacks), formatBytes(uint64(removeBytes)))
+
+ var obsoletePacks restic.IDSet
+ if len(rewritePacks) != 0 {
+ bar = newProgressMax(!gopts.Quiet, uint64(len(rewritePacks)), "packs rewritten")
+ bar.Start()
+ obsoletePacks, err = repository.Repack(ctx, repo, rewritePacks, usedBlobs, bar)
+ if err != nil {
+ return err
+ }
+ bar.Done()
+ }
+
+ removePacks.Merge(obsoletePacks)
+
+ if err = rebuildIndex(ctx, repo, removePacks); err != nil {
+ return err
+ }
+
+ if len(removePacks) != 0 {
+ bar = newProgressMax(!gopts.Quiet, uint64(len(removePacks)), "packs deleted")
+ bar.Start()
+ for packID := range removePacks {
+ h := restic.Handle{Type: restic.DataFile, Name: packID.String()}
+ err = repo.Backend().Remove(ctx, h)
+ if err != nil {
+ Warnf("unable to remove file %v from the repository\n", packID.Str())
+ }
+ bar.Report(restic.Stat{Blobs: 1})
+ }
+ bar.Done()
+ }
+
+ Verbosef("done\n")
+ return nil
+}
diff --git a/cmd/restic/cmd_rebuild_index.go b/cmd/restic/cmd_rebuild_index.go
new file mode 100644
index 000000000..92cbf3360
--- /dev/null
+++ b/cmd/restic/cmd_rebuild_index.go
@@ -0,0 +1,86 @@
+package main
+
+import (
+ "context"
+
+ "github.com/restic/restic/internal/index"
+ "github.com/restic/restic/internal/restic"
+
+ "github.com/spf13/cobra"
+)
+
+var cmdRebuildIndex = &cobra.Command{
+ Use: "rebuild-index [flags]",
+ Short: "Build a new index file",
+ Long: `
+The "rebuild-index" command creates a new index based on the pack files in the
+repository.
+`,
+ DisableAutoGenTag: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runRebuildIndex(globalOptions)
+ },
+}
+
+func init() {
+ cmdRoot.AddCommand(cmdRebuildIndex)
+}
+
+func runRebuildIndex(gopts GlobalOptions) error {
+ repo, err := OpenRepository(gopts)
+ if err != nil {
+ return err
+ }
+
+ lock, err := lockRepoExclusive(repo)
+ defer unlockRepo(lock)
+ if err != nil {
+ return err
+ }
+
+ ctx, cancel := context.WithCancel(gopts.ctx)
+ defer cancel()
+ return rebuildIndex(ctx, repo, restic.NewIDSet())
+}
+
+func rebuildIndex(ctx context.Context, repo restic.Repository, ignorePacks restic.IDSet) error {
+ Verbosef("counting files in repo\n")
+
+ var packs uint64
+ for range repo.List(ctx, restic.DataFile) {
+ packs++
+ }
+
+ bar := newProgressMax(!globalOptions.Quiet, packs-uint64(len(ignorePacks)), "packs")
+ idx, _, err := index.New(ctx, repo, ignorePacks, bar)
+ if err != nil {
+ return err
+ }
+
+ Verbosef("finding old index files\n")
+
+ var supersedes restic.IDs
+ for id := range repo.List(ctx, restic.IndexFile) {
+ supersedes = append(supersedes, id)
+ }
+
+ id, err := idx.Save(ctx, repo, supersedes)
+ if err != nil {
+ return err
+ }
+
+ Verbosef("saved new index as %v\n", id.Str())
+
+ Verbosef("remove %d old index files\n", len(supersedes))
+
+ for _, id := range supersedes {
+ if err := repo.Backend().Remove(ctx, restic.Handle{
+ Type: restic.IndexFile,
+ Name: id.String(),
+ }); err != nil {
+ Warnf("error removing old index %v: %v\n", id.Str(), err)
+ }
+ }
+
+ return nil
+}
diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go
new file mode 100644
index 000000000..11eb13a23
--- /dev/null
+++ b/cmd/restic/cmd_restore.go
@@ -0,0 +1,157 @@
+package main
+
+import (
+ "github.com/restic/restic/internal/debug"
+ "github.com/restic/restic/internal/errors"
+ "github.com/restic/restic/internal/filter"
+ "github.com/restic/restic/internal/restic"
+
+ "github.com/spf13/cobra"
+)
+
+var cmdRestore = &cobra.Command{
+ Use: "restore [flags] snapshotID",
+ Short: "Extract the data from a snapshot",
+ Long: `
+The "restore" command extracts the data from a snapshot from the repository to
+a directory.
+
+The special snapshot "latest" can be used to restore the latest snapshot in the
+repository.
+`,
+ DisableAutoGenTag: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runRestore(restoreOptions, globalOptions, args)
+ },
+}
+
+// RestoreOptions collects all options for the restore command.
+type RestoreOptions struct {
+ Exclude []string
+ Include []string
+ Target string
+ Host string
+ Paths []string
+ Tags restic.TagLists
+}
+
+var restoreOptions RestoreOptions
+
+func init() {
+ cmdRoot.AddCommand(cmdRestore)
+
+ flags := cmdRestore.Flags()
+ flags.StringArrayVarP(&restoreOptions.Exclude, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)")
+ flags.StringArrayVarP(&restoreOptions.Include, "include", "i", nil, "include a `pattern`, exclude everything else (can be specified multiple times)")
+ flags.StringVarP(&restoreOptions.Target, "target", "t", "", "directory to extract data to")
+
+ flags.StringVarP(&restoreOptions.Host, "host", "H", "", `only consider snapshots for this host when the snapshot ID is "latest"`)
+ flags.Var(&restoreOptions.Tags, "tag", "only consider snapshots which include this `taglist` for snapshot ID \"latest\"")
+ flags.StringArrayVar(&restoreOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path` for snapshot ID \"latest\"")
+}
+
+func runRestore(opts RestoreOptions, gopts GlobalOptions, args []string) error {
+ ctx := gopts.ctx
+
+ if len(args) != 1 {
+ return errors.Fatal("no snapshot ID specified")
+ }
+
+ if opts.Target == "" {
+ return errors.Fatal("please specify a directory to restore to (--target)")
+ }
+
+ if len(opts.Exclude) > 0 && len(opts.Include) > 0 {
+ return errors.Fatal("exclude and include patterns are mutually exclusive")
+ }
+
+ snapshotIDString := args[0]
+
+ debug.Log("restore %v to %v", snapshotIDString, opts.Target)
+
+ repo, err := OpenRepository(gopts)
+ if err != nil {
+ return err
+ }
+
+ if !gopts.NoLock {
+ lock, err := lockRepo(repo)
+ defer unlockRepo(lock)
+ if err != nil {
+ return err
+ }
+ }
+
+ err = repo.LoadIndex(ctx)
+ if err != nil {
+ return err
+ }
+
+ var id restic.ID
+
+ if snapshotIDString == "latest" {
+ id, err = restic.FindLatestSnapshot(ctx, repo, opts.Paths, opts.Tags, opts.Host)
+ if err != nil {
+ Exitf(1, "latest snapshot for criteria not found: %v Paths:%v Host:%v", err, opts.Paths, opts.Host)
+ }
+ } else {
+ id, err = restic.FindSnapshot(repo, snapshotIDString)
+ if err != nil {
+ Exitf(1, "invalid id %q: %v", snapshotIDString, err)
+ }
+ }
+
+ res, err := restic.NewRestorer(repo, id)
+ if err != nil {
+ Exitf(2, "creating restorer failed: %v\n", err)
+ }
+
+ totalErrors := 0
+ res.Error = func(dir string, node *restic.Node, err error) error {
+ Warnf("ignoring error for %s: %s\n", dir, err)
+ totalErrors++
+ return nil
+ }
+
+ selectExcludeFilter := func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) {
+ matched, _, err := filter.List(opts.Exclude, item)
+ if err != nil {
+ Warnf("error for exclude pattern: %v", err)
+ }
+
+ // An exclude filter is basically a 'wildcard but foo',
+ // so even if a childMayMatch, other children of a dir may not,
+ // therefore childMayMatch does not matter, but we should not go down
+ // unless the dir is selected for restore
+ selectedForRestore = !matched
+ childMayBeSelected = selectedForRestore && node.Type == "dir"
+
+ return selectedForRestore, childMayBeSelected
+ }
+
+ selectIncludeFilter := func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) {
+ matched, childMayMatch, err := filter.List(opts.Include, item)
+ if err != nil {
+ Warnf("error for include pattern: %v", err)
+ }
+
+ selectedForRestore = matched
+ childMayBeSelected = childMayMatch && node.Type == "dir"
+
+ return selectedForRestore, childMayBeSelected
+ }
+
+ if len(opts.Exclude) > 0 {
+ res.SelectFilter = selectExcludeFilter
+ } else if len(opts.Include) > 0 {
+ res.SelectFilter = selectIncludeFilter
+ }
+
+ Verbosef("restoring %s to %s\n", res.Snapshot(), opts.Target)
+
+ err = res.RestoreTo(ctx, opts.Target)
+ if totalErrors > 0 {
+ Printf("There were %d errors\n", totalErrors)
+ }
+ return err
+}
diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go
new file mode 100644
index 000000000..7e8530b31
--- /dev/null
+++ b/cmd/restic/cmd_snapshots.go
@@ -0,0 +1,186 @@
+package main
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "sort"
+
+ "github.com/restic/restic/internal/restic"
+ "github.com/spf13/cobra"
+)
+
+var cmdSnapshots = &cobra.Command{
+ Use: "snapshots [snapshotID ...]",
+ Short: "List all snapshots",
+ Long: `
+The "snapshots" command lists all snapshots stored in the repository.
+`,
+ DisableAutoGenTag: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runSnapshots(snapshotOptions, globalOptions, args)
+ },
+}
+
+// SnapshotOptions bundles all options for the snapshots command.
+type SnapshotOptions struct {
+ Host string
+ Tags restic.TagLists
+ Paths []string
+ Compact bool
+}
+
+var snapshotOptions SnapshotOptions
+
+func init() {
+ cmdRoot.AddCommand(cmdSnapshots)
+
+ f := cmdSnapshots.Flags()
+ f.StringVarP(&snapshotOptions.Host, "host", "H", "", "only consider snapshots for this `host`")
+ f.Var(&snapshotOptions.Tags, "tag", "only consider snapshots which include this `taglist` (can be specified multiple times)")
+ f.StringArrayVar(&snapshotOptions.Paths, "path", nil, "only consider snapshots for this `path` (can be specified multiple times)")
+ f.BoolVarP(&snapshotOptions.Compact, "compact", "c", false, "use compact format")
+}
+
+func runSnapshots(opts SnapshotOptions, gopts GlobalOptions, args []string) error {
+ repo, err := OpenRepository(gopts)
+ if err != nil {
+ return err
+ }
+
+ if !gopts.NoLock {
+ lock, err := lockRepo(repo)
+ defer unlockRepo(lock)
+ if err != nil {
+ return err
+ }
+ }
+
+ ctx, cancel := context.WithCancel(gopts.ctx)
+ defer cancel()
+
+ var list restic.Snapshots
+ for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, args) {
+ list = append(list, sn)
+ }
+ sort.Sort(sort.Reverse(list))
+
+ if gopts.JSON {
+ err := printSnapshotsJSON(gopts.stdout, list)
+ if err != nil {
+ Warnf("error printing snapshot: %v\n", err)
+ }
+ return nil
+ }
+ PrintSnapshots(gopts.stdout, list, opts.Compact)
+
+ return nil
+}
+
+// PrintSnapshots prints a text table of the snapshots in list to stdout.
+func PrintSnapshots(stdout io.Writer, list restic.Snapshots, compact bool) {
+
+ // Determine the max widths for host and tag.
+ maxHost, maxTag := 10, 6
+ for _, sn := range list {
+ if len(sn.Hostname) > maxHost {
+ maxHost = len(sn.Hostname)
+ }
+ for _, tag := range sn.Tags {
+ if len(tag) > maxTag {
+ maxTag = len(tag)
+ }
+ }
+ }
+
+ tab := NewTable()
+ if !compact {
+ tab.Header = fmt.Sprintf("%-8s %-19s %-*s %-*s %-3s %s", "ID", "Date", -maxHost, "Host", -maxTag, "Tags", "", "Directory")
+ tab.RowFormat = fmt.Sprintf("%%-8s %%-19s %%%ds %%%ds %%-3s %%s", -maxHost, -maxTag)
+ } else {
+ tab.Header = fmt.Sprintf("%-8s %-19s %-*s %-*s", "ID", "Date", -maxHost, "Host", -maxTag, "Tags")
+ tab.RowFormat = fmt.Sprintf("%%-8s %%-19s %%%ds %%s", -maxHost)
+ }
+
+ for _, sn := range list {
+ if len(sn.Paths) == 0 {
+ continue
+ }
+
+ firstTag := ""
+ if len(sn.Tags) > 0 {
+ firstTag = sn.Tags[0]
+ }
+
+ rows := len(sn.Paths)
+ if rows < len(sn.Tags) {
+ rows = len(sn.Tags)
+ }
+
+ treeElement := " "
+ if rows != 1 {
+ treeElement = "┌──"
+ }
+
+ if !compact {
+ tab.Rows = append(tab.Rows, []interface{}{sn.ID().Str(), sn.Time.Format(TimeFormat), sn.Hostname, firstTag, treeElement, sn.Paths[0]})
+ } else {
+ allTags := ""
+ for _, tag := range sn.Tags {
+ allTags += tag + " "
+ }
+ tab.Rows = append(tab.Rows, []interface{}{sn.ID().Str(), sn.Time.Format(TimeFormat), sn.Hostname, allTags})
+ continue
+ }
+
+ if len(sn.Tags) > rows {
+ rows = len(sn.Tags)
+ }
+
+ for i := 1; i < rows; i++ {
+ path := ""
+ if len(sn.Paths) > i {
+ path = sn.Paths[i]
+ }
+
+ tag := ""
+ if len(sn.Tags) > i {
+ tag = sn.Tags[i]
+ }
+
+ treeElement := "│"
+ if i == (rows - 1) {
+ treeElement = "└──"
+ }
+
+ tab.Rows = append(tab.Rows, []interface{}{"", "", "", tag, treeElement, path})
+ }
+ }
+
+ tab.Write(stdout)
+}
+
+// Snapshot helps to print Snaphots as JSON with their ID included.
+type Snapshot struct {
+ *restic.Snapshot
+
+ ID *restic.ID `json:"id"`
+}
+
+// printSnapshotsJSON writes the JSON representation of list to stdout.
+func printSnapshotsJSON(stdout io.Writer, list restic.Snapshots) error {
+
+ var snapshots []Snapshot
+
+ for _, sn := range list {
+
+ k := Snapshot{
+ Snapshot: sn,
+ ID: sn.ID(),
+ }
+ snapshots = append(snapshots, k)
+ }
+
+ return json.NewEncoder(stdout).Encode(snapshots)
+}
diff --git a/cmd/restic/cmd_tag.go b/cmd/restic/cmd_tag.go
new file mode 100644
index 000000000..2c70399e5
--- /dev/null
+++ b/cmd/restic/cmd_tag.go
@@ -0,0 +1,143 @@
+package main
+
+import (
+ "context"
+
+ "github.com/spf13/cobra"
+
+ "github.com/restic/restic/internal/debug"
+ "github.com/restic/restic/internal/errors"
+ "github.com/restic/restic/internal/repository"
+ "github.com/restic/restic/internal/restic"
+)
+
+var cmdTag = &cobra.Command{
+ Use: "tag [flags] [snapshot-ID ...]",
+ Short: "Modify tags on snapshots",
+ Long: `
+The "tag" command allows you to modify tags on exiting snapshots.
+
+You can either set/replace the entire set of tags on a snapshot, or
+add tags to/remove tags from the existing set.
+
+When no snapshot-ID is given, all snapshots matching the host, tag and path filter criteria are modified.
+`,
+ DisableAutoGenTag: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runTag(tagOptions, globalOptions, args)
+ },
+}
+
+// TagOptions bundles all options for the 'tag' command.
+type TagOptions struct {
+ Host string
+ Paths []string
+ Tags restic.TagLists
+ SetTags []string
+ AddTags []string
+ RemoveTags []string
+}
+
+var tagOptions TagOptions
+
+func init() {
+ cmdRoot.AddCommand(cmdTag)
+
+ tagFlags := cmdTag.Flags()
+ tagFlags.StringSliceVar(&tagOptions.SetTags, "set", nil, "`tag` which will replace the existing tags (can be given multiple times)")
+ tagFlags.StringSliceVar(&tagOptions.AddTags, "add", nil, "`tag` which will be added to the existing tags (can be given multiple times)")
+ tagFlags.StringSliceVar(&tagOptions.RemoveTags, "remove", nil, "`tag` which will be removed from the existing tags (can be given multiple times)")
+
+ tagFlags.StringVarP(&tagOptions.Host, "host", "H", "", "only consider snapshots for this `host`, when no snapshot ID is given")
+ tagFlags.Var(&tagOptions.Tags, "tag", "only consider snapshots which include this `taglist`, when no snapshot-ID is given")
+ tagFlags.StringArrayVar(&tagOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path`, when no snapshot-ID is given")
+}
+
+func changeTags(repo *repository.Repository, sn *restic.Snapshot, setTags, addTags, removeTags []string) (bool, error) {
+ var changed bool
+
+ if len(setTags) != 0 {
+ // Setting the tag to an empty string really means no tags.
+ if len(setTags) == 1 && setTags[0] == "" {
+ setTags = nil
+ }
+ sn.Tags = setTags
+ changed = true
+ } else {
+ changed = sn.AddTags(addTags)
+ if sn.RemoveTags(removeTags) {
+ changed = true
+ }
+ }
+
+ if changed {
+ // Retain the original snapshot id over all tag changes.
+ if sn.Original == nil {
+ sn.Original = sn.ID()
+ }
+
+ // Save the new snapshot.
+ id, err := repo.SaveJSONUnpacked(context.TODO(), restic.SnapshotFile, sn)
+ if err != nil {
+ return false, err
+ }
+
+ debug.Log("new snapshot saved as %v", id.Str())
+
+ if err = repo.Flush(); err != nil {
+ return false, err
+ }
+
+ // Remove the old snapshot.
+ h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
+ if err = repo.Backend().Remove(context.TODO(), h); err != nil {
+ return false, err
+ }
+
+ debug.Log("old snapshot %v removed", sn.ID())
+ }
+ return changed, nil
+}
+
+func runTag(opts TagOptions, gopts GlobalOptions, args []string) error {
+ if len(opts.SetTags) == 0 && len(opts.AddTags) == 0 && len(opts.RemoveTags) == 0 {
+ return errors.Fatal("nothing to do!")
+ }
+ if len(opts.SetTags) != 0 && (len(opts.AddTags) != 0 || len(opts.RemoveTags) != 0) {
+ return errors.Fatal("--set and --add/--remove cannot be given at the same time")
+ }
+
+ repo, err := OpenRepository(gopts)
+ if err != nil {
+ return err
+ }
+
+ if !gopts.NoLock {
+ Verbosef("Create exclusive lock for repository\n")
+ lock, err := lockRepoExclusive(repo)
+ defer unlockRepo(lock)
+ if err != nil {
+ return err
+ }
+ }
+
+ changeCnt := 0
+ ctx, cancel := context.WithCancel(gopts.ctx)
+ defer cancel()
+ for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, args) {
+ changed, err := changeTags(repo, sn, opts.SetTags, opts.AddTags, opts.RemoveTags)
+ if err != nil {
+ Warnf("unable to modify the tags for snapshot ID %q, ignoring: %v\n", sn.ID(), err)
+ continue
+ }
+ if changed {
+ changeCnt++
+ }
+ }
+ if changeCnt == 0 {
+ Verbosef("No snapshots were modified\n")
+ } else {
+ Verbosef("Modified tags on %v snapshots\n", changeCnt)
+ }
+ return nil
+}
diff --git a/cmd/restic/cmd_unlock.go b/cmd/restic/cmd_unlock.go
new file mode 100644
index 000000000..5297b0e4e
--- /dev/null
+++ b/cmd/restic/cmd_unlock.go
@@ -0,0 +1,53 @@
+package main
+
+import (
+ "context"
+
+ "github.com/restic/restic/internal/restic"
+ "github.com/spf13/cobra"
+)
+
+var unlockCmd = &cobra.Command{
+ Use: "unlock",
+ Short: "Remove locks other processes created",
+ Long: `
+The "unlock" command removes stale locks that have been created by other restic processes.
+`,
+ DisableAutoGenTag: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runUnlock(unlockOptions, globalOptions)
+ },
+}
+
+// UnlockOptions collects all options for the unlock command.
+type UnlockOptions struct {
+ RemoveAll bool
+}
+
+var unlockOptions UnlockOptions
+
+func init() {
+ cmdRoot.AddCommand(unlockCmd)
+
+ unlockCmd.Flags().BoolVar(&unlockOptions.RemoveAll, "remove-all", false, "remove all locks, even non-stale ones")
+}
+
+func runUnlock(opts UnlockOptions, gopts GlobalOptions) error {
+ repo, err := OpenRepository(gopts)
+ if err != nil {
+ return err
+ }
+
+ fn := restic.RemoveStaleLocks
+ if opts.RemoveAll {
+ fn = restic.RemoveAllLocks
+ }
+
+ err = fn(context.TODO(), repo)
+ if err != nil {
+ return err
+ }
+
+ Verbosef("successfully removed locks\n")
+ return nil
+}
diff --git a/cmd/restic/cmd_version.go b/cmd/restic/cmd_version.go
new file mode 100644
index 000000000..669c356be
--- /dev/null
+++ b/cmd/restic/cmd_version.go
@@ -0,0 +1,26 @@
+package main
+
+import (
+ "fmt"
+ "runtime"
+
+ "github.com/spf13/cobra"
+)
+
+var versionCmd = &cobra.Command{
+ Use: "version",
+ Short: "Print version information",
+ Long: `
+The "version" command prints detailed information about the build environment
+and the version of this software.
+`,
+ DisableAutoGenTag: true,
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Printf("restic %s\ncompiled with %v on %v/%v\n",
+ version, runtime.Version(), runtime.GOOS, runtime.GOARCH)
+ },
+}
+
+func init() {
+ cmdRoot.AddCommand(versionCmd)
+}
diff --git a/cmd/restic/doc.go b/cmd/restic/doc.go
new file mode 100644
index 000000000..19b609b8d
--- /dev/null
+++ b/cmd/restic/doc.go
@@ -0,0 +1,2 @@
+// This package contains the code for the restic executable.
+package main
diff --git a/cmd/restic/exclude.go b/cmd/restic/exclude.go
new file mode 100644
index 000000000..369c4df9a
--- /dev/null
+++ b/cmd/restic/exclude.go
@@ -0,0 +1,179 @@
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/restic/restic/internal/debug"
+ "github.com/restic/restic/internal/errors"
+ "github.com/restic/restic/internal/filter"
+ "github.com/restic/restic/internal/fs"
+)
+
+// RejectFunc is a function that takes a filename and os.FileInfo of a
+// file that would be included in the backup. The function returns true if it
+// should be excluded (rejected) from the backup.
+type RejectFunc func(path string, fi os.FileInfo) bool
+
+// rejectByPattern returns a RejectFunc which rejects files that match
+// one of the patterns.
+func rejectByPattern(patterns []string) RejectFunc {
+ return func(item string, fi os.FileInfo) bool {
+ matched, _, err := filter.List(patterns, item)
+ if err != nil {
+ Warnf("error for exclude pattern: %v", err)
+ }
+
+ if matched {
+ debug.Log("path %q excluded by an exclude pattern", item)
+ return true
+ }
+
+ return false
+ }
+}
+
+// rejectIfPresent returns a RejectFunc which itself returns whether a path
+// should be excluded. The RejectFunc considers a file to be excluded when
+// it resides in a directory with an exclusion file, that is specified by
+// excludeFileSpec in the form "filename[:content]". The returned error is
+// non-nil if the filename component of excludeFileSpec is empty.
+func rejectIfPresent(excludeFileSpec string) (RejectFunc, error) {
+ if excludeFileSpec == "" {
+ return nil, errors.New("name for exclusion tagfile is empty")
+ }
+ colon := strings.Index(excludeFileSpec, ":")
+ if colon == 0 {
+ return nil, fmt.Errorf("no name for exclusion tagfile provided")
+ }
+ tf, tc := "", ""
+ if colon > 0 {
+ tf = excludeFileSpec[:colon]
+ tc = excludeFileSpec[colon+1:]
+ } else {
+ tf = excludeFileSpec
+ }
+ debug.Log("using %q as exclusion tagfile", tf)
+ fn := func(filename string, _ os.FileInfo) bool {
+ return isExcludedByFile(filename, tf, tc)
+ }
+ return fn, nil
+}
+
+// isExcludedByFile interprets filename as a path and returns true if that file
+// is in a excluded directory. A directory is identified as excluded if it contains a
+// tagfile which bears the name specified in tagFilename and starts with header.
+func isExcludedByFile(filename, tagFilename, header string) bool {
+ if tagFilename == "" {
+ return false
+ }
+ dir, base := filepath.Split(filename)
+ if base == tagFilename {
+ return false // do not exclude the tagfile itself
+ }
+ tf := filepath.Join(dir, tagFilename)
+ _, err := fs.Lstat(tf)
+ if os.IsNotExist(err) {
+ return false
+ }
+ if err != nil {
+ Warnf("could not access exclusion tagfile: %v", err)
+ return false
+ }
+ // when no signature is given, the mere presence of tf is enough reason
+ // to exclude filename
+ if len(header) == 0 {
+ return true
+ }
+ // From this stage, errors mean tagFilename exists but it is malformed.
+ // Warnings will be generated so that the user is informed that the
+ // indented ignore-action is not performed.
+ f, err := os.Open(tf)
+ if err != nil {
+ Warnf("could not open exclusion tagfile: %v", err)
+ return false
+ }
+ defer f.Close()
+ buf := make([]byte, len(header))
+ _, err = io.ReadFull(f, buf)
+ // EOF is handled with a dedicated message, otherwise the warning were too cryptic
+ if err == io.EOF {
+ Warnf("invalid (too short) signature in exclusion tagfile %q\n", tf)
+ return false
+ }
+ if err != nil {
+ Warnf("could not read signature from exclusion tagfile %q: %v\n", tf, err)
+ return false
+ }
+ if bytes.Compare(buf, []byte(header)) != 0 {
+ Warnf("invalid signature in exclusion tagfile %q\n", tf)
+ return false
+ }
+ return true
+}
+
+// gatherDevices returns the set of unique device ids of the files and/or
+// directory paths listed in "items".
+func gatherDevices(items []string) (deviceMap map[string]uint64, err error) {
+ deviceMap = make(map[string]uint64)
+ for _, item := range items {
+ fi, err := fs.Lstat(item)
+ if err != nil {
+ return nil, err
+ }
+ id, err := fs.DeviceID(fi)
+ if err != nil {
+ return nil, err
+ }
+ deviceMap[item] = id
+ }
+ if len(deviceMap) == 0 {
+ return nil, errors.New("zero allowed devices")
+ }
+ return deviceMap, nil
+}
+
+// rejectByDevice returns a RejectFunc that rejects files which are on a
+// different file systems than the files/dirs in samples.
+func rejectByDevice(samples []string) (RejectFunc, error) {
+ allowed, err := gatherDevices(samples)
+ if err != nil {
+ return nil, err
+ }
+ debug.Log("allowed devices: %v\n", allowed)
+
+ return func(item string, fi os.FileInfo) bool {
+ if fi == nil {
+ return false
+ }
+
+ id, err := fs.DeviceID(fi)
+ if err != nil {
+ // This should never happen because gatherDevices() would have
+ // errored out earlier. If it still does that's a reason to panic.
+ panic(err)
+ }
+
+ for dir := item; dir != ""; dir = filepath.Dir(dir) {
+ debug.Log("item %v, test dir %v", item, dir)
+
+ allowedID, ok := allowed[dir]
+ if !ok {
+ continue
+ }
+
+ if allowedID != id {
+ debug.Log("path %q on disallowed device %d", item, id)
+ return true
+ }
+
+ return false
+ }
+
+ panic(fmt.Sprintf("item %v, device id %v not found, allowedDevs: %v", item, id, allowed))
+ }, nil
+}
diff --git a/cmd/restic/exclude_test.go b/cmd/restic/exclude_test.go
new file mode 100644
index 000000000..ec9b0bade
--- /dev/null
+++ b/cmd/restic/exclude_test.go
@@ -0,0 +1,84 @@
+package main
+
+import (
+ "io/ioutil"
+ "path/filepath"
+ "testing"
+
+ "github.com/restic/restic/internal/test"
+)
+
+func TestRejectByPattern(t *testing.T) {
+ var tests = []struct {
+ filename string
+ reject bool
+ }{
+ {filename: "/home/user/foo.go", reject: true},
+ {filename: "/home/user/foo.c", reject: false},
+ {filename: "/home/user/foobar", reject: false},
+ {filename: "/home/user/foobar/x", reject: true},
+ {filename: "/home/user/README", reject: false},
+ {filename: "/home/user/README.md", reject: true},
+ }
+
+ patterns := []string{"*.go", "README.md", "/home/user/foobar/*"}
+
+ for _, tc := range tests {
+ t.Run("", func(t *testing.T) {
+ reject := rejectByPattern(patterns)
+ res := reject(tc.filename, nil)
+ if res != tc.reject {
+ t.Fatalf("wrong result for filename %v: want %v, got %v",
+ tc.filename, tc.reject, res)
+ }
+ })
+ }
+}
+
+func TestIsExcludedByFile(t *testing.T) {
+ const (
+ tagFilename = "CACHEDIR.TAG"
+ header = "Signature: 8a477f597d28d172789f06886806bc55"
+ )
+ tests := []struct {
+ name string
+ tagFile string
+ content string
+ want bool
+ }{
+ {"NoTagfile", "", "", false},
+ {"EmptyTagfile", tagFilename, "", true},
+ {"UnnamedTagFile", "", header, false},
+ {"WrongTagFile", "notatagfile", header, false},
+ {"IncorrectSig", tagFilename, header[1:], false},
+ {"ValidSig", tagFilename, header, true},
+ {"ValidPlusStuff", tagFilename, header + "foo", true},
+ {"ValidPlusNewlineAndStuff", tagFilename, header + "\nbar", true},
+ }
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ tempDir, cleanup := test.TempDir(t)
+ defer cleanup()
+
+ foo := filepath.Join(tempDir, "foo")
+ err := ioutil.WriteFile(foo, []byte("foo"), 0666)
+ if err != nil {
+ t.Fatalf("could not write file: %v", err)
+ }
+ if tc.tagFile != "" {
+ tagFile := filepath.Join(tempDir, tc.tagFile)
+ err = ioutil.WriteFile(tagFile, []byte(tc.content), 0666)
+ if err != nil {
+ t.Fatalf("could not write tagfile: %v", err)
+ }
+ }
+ h := header
+ if tc.content == "" {
+ h = ""
+ }
+ if got := isExcludedByFile(foo, tagFilename, h); tc.want != got {
+ t.Fatalf("expected %v, got %v", tc.want, got)
+ }
+ })
+ }
+}
diff --git a/cmd/restic/find.go b/cmd/restic/find.go
new file mode 100644
index 000000000..8b227fa55
--- /dev/null
+++ b/cmd/restic/find.go
@@ -0,0 +1,70 @@
+package main
+
+import (
+ "context"
+
+ "github.com/restic/restic/internal/repository"
+ "github.com/restic/restic/internal/restic"
+)
+
+// FindFilteredSnapshots yields Snapshots, either given explicitly by `snapshotIDs` or filtered from the list of all snapshots.
+func FindFilteredSnapshots(ctx context.Context, repo *repository.Repository, host string, tags []restic.TagList, paths []string, snapshotIDs []string) <-chan *restic.Snapshot {
+ out := make(chan *restic.Snapshot)
+ go func() {
+ defer close(out)
+ if len(snapshotIDs) != 0 {
+ var (
+ id restic.ID
+ usedFilter bool
+ err error
+ )
+ ids := make(restic.IDs, 0, len(snapshotIDs))
+ // Process all snapshot IDs given as arguments.
+ for _, s := range snapshotIDs {
+ if s == "latest" {
+ id, err = restic.FindLatestSnapshot(ctx, repo, paths, tags, host)
+ if err != nil {
+ Warnf("Ignoring %q, no snapshot matched given filter (Paths:%v Tags:%v Host:%v)\n", s, paths, tags, host)
+ usedFilter = true
+ continue
+ }
+ } else {
+ id, err = restic.FindSnapshot(repo, s)
+ if err != nil {
+ Warnf("Ignoring %q, it is not a snapshot id\n", s)
+ continue
+ }
+ }
+ ids = append(ids, id)
+ }
+
+ // Give the user some indication their filters are not used.
+ if !usedFilter && (host != "" || len(tags) != 0 || len(paths) != 0) {
+ Warnf("Ignoring filters as there are explicit snapshot ids given\n")
+ }
+
+ for _, id := range ids.Uniq() {
+ sn, err := restic.LoadSnapshot(ctx, repo, id)
+ if err != nil {
+ Warnf("Ignoring %q, could not load snapshot: %v\n", id, err)
+ continue
+ }
+ select {
+ case <-ctx.Done():
+ return
+ case out <- sn:
+ }
+ }
+ return
+ }
+
+ for _, sn := range restic.FindFilteredSnapshots(ctx, repo, host, tags, paths) {
+ select {
+ case <-ctx.Done():
+ return
+ case out <- sn:
+ }
+ }
+ }()
+ return out
+}
diff --git a/cmd/restic/flags_test.go b/cmd/restic/flags_test.go
new file mode 100644
index 000000000..b7f88e906
--- /dev/null
+++ b/cmd/restic/flags_test.go
@@ -0,0 +1,24 @@
+package main
+
+import (
+ "io/ioutil"
+ "testing"
+)
+
+// TestFlags checks for double defined flags, the commands will panic on
+// ParseFlags() when a shorthand flag is defined twice.
+func TestFlags(t *testing.T) {
+ for _, cmd := range cmdRoot.Commands() {
+ t.Run(cmd.Name(), func(t *testing.T) {
+ cmd.Flags().SetOutput(ioutil.Discard)
+ err := cmd.ParseFlags([]string{"--help"})
+ if err.Error() == "pflag: help requested" {
+ err = nil
+ }
+
+ if err != nil {
+ t.Fatal(err)
+ }
+ })
+ }
+}
diff --git a/cmd/restic/format.go b/cmd/restic/format.go
new file mode 100644
index 000000000..5a1f2ba8c
--- /dev/null
+++ b/cmd/restic/format.go
@@ -0,0 +1,84 @@
+package main
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "time"
+
+ "github.com/restic/restic/internal/restic"
+)
+
+func formatBytes(c uint64) string {
+ b := float64(c)
+
+ switch {
+ case c > 1<<40:
+ return fmt.Sprintf("%.3f TiB", b/(1<<40))
+ case c > 1<<30:
+ return fmt.Sprintf("%.3f GiB", b/(1<<30))
+ case c > 1<<20:
+ return fmt.Sprintf("%.3f MiB", b/(1<<20))
+ case c > 1<<10:
+ return fmt.Sprintf("%.3f KiB", b/(1<<10))
+ default:
+ return fmt.Sprintf("%dB", c)
+ }
+}
+
+func formatSeconds(sec uint64) string {
+ hours := sec / 3600
+ sec -= hours * 3600
+ min := sec / 60
+ sec -= min * 60
+ if hours > 0 {
+ return fmt.Sprintf("%d:%02d:%02d", hours, min, sec)
+ }
+
+ return fmt.Sprintf("%d:%02d", min, sec)
+}
+
+func formatPercent(numerator uint64, denominator uint64) string {
+ if denominator == 0 {
+ return ""
+ }
+
+ percent := 100.0 * float64(numerator) / float64(denominator)
+
+ if percent > 100 {
+ percent = 100
+ }
+
+ return fmt.Sprintf("%3.2f%%", percent)
+}
+
+func formatRate(bytes uint64, duration time.Duration) string {
+ sec := float64(duration) / float64(time.Second)
+ rate := float64(bytes) / sec / (1 << 20)
+ return fmt.Sprintf("%.2fMiB/s", rate)
+}
+
+func formatDuration(d time.Duration) string {
+ sec := uint64(d / time.Second)
+ return formatSeconds(sec)
+}
+
+func formatNode(prefix string, n *restic.Node, long bool) string {
+ if !long {
+ return filepath.Join(prefix, n.Name)
+ }
+
+ switch n.Type {
+ case "file":
+ return fmt.Sprintf("%s %5d %5d %6d %s %s",
+ n.Mode, n.UID, n.GID, n.Size, n.ModTime.Format(TimeFormat), filepath.Join(prefix, n.Name))
+ case "dir":
+ return fmt.Sprintf("%s %5d %5d %6d %s %s",
+ n.Mode|os.ModeDir, n.UID, n.GID, n.Size, n.ModTime.Format(TimeFormat), filepath.Join(prefix, n.Name))
+ case "symlink":
+ return fmt.Sprintf("%s %5d %5d %6d %s %s -> %s",
+ n.Mode|os.ModeSymlink, n.UID, n.GID, n.Size, n.ModTime.Format(TimeFormat), filepath.Join(prefix, n.Name), n.LinkTarget)
+ default:
+ return fmt.Sprintf("<Node(%s) %s>", n.Type, n.Name)
+ }
+}
diff --git a/cmd/restic/global.go b/cmd/restic/global.go
new file mode 100644
index 000000000..ccfe1b9c0
--- /dev/null
+++ b/cmd/restic/global.go
@@ -0,0 +1,542 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "runtime"
+ "strings"
+ "syscall"
+
+ "github.com/restic/restic/internal/backend/azure"
+ "github.com/restic/restic/internal/backend/b2"
+ "github.com/restic/restic/internal/backend/gs"
+ "github.com/restic/restic/internal/backend/local"
+ "github.com/restic/restic/internal/backend/location"
+ "github.com/restic/restic/internal/backend/rest"
+ "github.com/restic/restic/internal/backend/s3"
+ "github.com/restic/restic/internal/backend/sftp"
+ "github.com/restic/restic/internal/backend/swift"
+ "github.com/restic/restic/internal/debug"
+ "github.com/restic/restic/internal/options"
+ "github.com/restic/restic/internal/repository"
+ "github.com/restic/restic/internal/restic"
+
+ "github.com/restic/restic/internal/errors"
+
+ "golang.org/x/crypto/ssh/terminal"
+)
+
+var version = "compiled manually"
+
+// GlobalOptions hold all global options for restic.
+type GlobalOptions struct {
+ Repo string
+ PasswordFile string
+ Quiet bool
+ NoLock bool
+ JSON bool
+
+ ctx context.Context
+ password string
+ stdout io.Writer
+ stderr io.Writer
+
+ Options []string
+
+ extended options.Options
+}
+
+var globalOptions = GlobalOptions{
+ stdout: os.Stdout,
+ stderr: os.Stderr,
+}
+
+func init() {
+ var cancel context.CancelFunc
+ globalOptions.ctx, cancel = context.WithCancel(context.Background())
+ AddCleanupHandler(func() error {
+ cancel()
+ return nil
+ })
+
+ f := cmdRoot.PersistentFlags()
+ f.StringVarP(&globalOptions.Repo, "repo", "r", os.Getenv("RESTIC_REPOSITORY"), "repository to backup to or restore from (default: $RESTIC_REPOSITORY)")
+ f.StringVarP(&globalOptions.PasswordFile, "password-file", "p", os.Getenv("RESTIC_PASSWORD_FILE"), "read the repository password from a file (default: $RESTIC_PASSWORD_FILE)")
+ f.BoolVarP(&globalOptions.Quiet, "quiet", "q", false, "do not output comprehensive progress report")
+ f.BoolVar(&globalOptions.NoLock, "no-lock", false, "do not lock the repo, this allows some operations on read-only repos")
+ f.BoolVarP(&globalOptions.JSON, "json", "", false, "set output mode to JSON for commands that support it")
+
+ f.StringSliceVarP(&globalOptions.Options, "option", "o", []string{}, "set extended option (`key=value`, can be specified multiple times)")
+
+ restoreTerminal()
+}
+
+// checkErrno returns nil when err is set to syscall.Errno(0), since this is no
+// error condition.
+func checkErrno(err error) error {
+ e, ok := err.(syscall.Errno)
+ if !ok {
+ return err
+ }
+
+ if e == 0 {
+ return nil
+ }
+
+ return err
+}
+
+func stdinIsTerminal() bool {
+ return terminal.IsTerminal(int(os.Stdin.Fd()))
+}
+
+func stdoutIsTerminal() bool {
+ return terminal.IsTerminal(int(os.Stdout.Fd()))
+}
+
+func stdoutTerminalWidth() int {
+ w, _, err := terminal.GetSize(int(os.Stdout.Fd()))
+ if err != nil {
+ return 0
+ }
+ return w
+}
+
+// restoreTerminal installs a cleanup handler that restores the previous
+// terminal state on exit.
+func restoreTerminal() {
+ if !stdoutIsTerminal() {
+ return
+ }
+
+ fd := int(os.Stdout.Fd())
+ state, err := terminal.GetState(fd)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unable to get terminal state: %v\n", err)
+ return
+ }
+
+ AddCleanupHandler(func() error {
+ err := checkErrno(terminal.Restore(fd, state))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unable to get restore terminal state: %#+v\n", err)
+ }
+ return err
+ })
+}
+
+// ClearLine creates a platform dependent string to clear the current
+// line, so it can be overwritten. ANSI sequences are not supported on
+// current windows cmd shell.
+func ClearLine() string {
+ if runtime.GOOS == "windows" {
+ if w := stdoutTerminalWidth(); w > 0 {
+ return strings.Repeat(" ", w-1) + "\r"
+ }
+ return ""
+ }
+ return "\x1b[2K"
+}
+
+// Printf writes the message to the configured stdout stream.
+func Printf(format string, args ...interface{}) {
+ _, err := fmt.Fprintf(globalOptions.stdout, format, args...)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unable to write to stdout: %v\n", err)
+ Exit(100)
+ }
+}
+
+// Verbosef calls Printf to write the message when the verbose flag is set.
+func Verbosef(format string, args ...interface{}) {
+ if globalOptions.Quiet {
+ return
+ }
+
+ Printf(format, args...)
+}
+
+// PrintProgress wraps fmt.Printf to handle the difference in writing progress
+// information to terminals and non-terminal stdout
+func PrintProgress(format string, args ...interface{}) {
+ var (
+ message string
+ carriageControl string
+ )
+ message = fmt.Sprintf(format, args...)
+
+ if !(strings.HasSuffix(message, "\r") || strings.HasSuffix(message, "\n")) {
+ if stdoutIsTerminal() {
+ carriageControl = "\r"
+ } else {
+ carriageControl = "\n"
+ }
+ message = fmt.Sprintf("%s%s", message, carriageControl)
+ }
+
+ if stdoutIsTerminal() {
+ message = fmt.Sprintf("%s%s", ClearLine(), message)
+ }
+
+ fmt.Print(message)
+}
+
+// Warnf writes the message to the configured stderr stream.
+func Warnf(format string, args ...interface{}) {
+ _, err := fmt.Fprintf(globalOptions.stderr, format, args...)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unable to write to stderr: %v\n", err)
+ Exit(100)
+ }
+}
+
+// Exitf uses Warnf to write the message and then terminates the process with
+// the given exit code.
+func Exitf(exitcode int, format string, args ...interface{}) {
+ if format[len(format)-1] != '\n' {
+ format += "\n"
+ }
+
+ Warnf(format, args...)
+ Exit(exitcode)
+}
+
+// resolvePassword determines the password to be used for opening the repository.
+func resolvePassword(opts GlobalOptions, env string) (string, error) {
+ if opts.PasswordFile != "" {
+ s, err := ioutil.ReadFile(opts.PasswordFile)
+ if os.IsNotExist(err) {
+ return "", errors.Fatalf("%s does not exist", opts.PasswordFile)
+ }
+ return strings.TrimSpace(string(s)), errors.Wrap(err, "Readfile")
+ }
+
+ if pwd := os.Getenv(env); pwd != "" {
+ return pwd, nil
+ }
+
+ return "", nil
+}
+
+// readPassword reads the password from the given reader directly.
+func readPassword(in io.Reader) (password string, err error) {
+ buf := make([]byte, 1000)
+ n, err := io.ReadFull(in, buf)
+ buf = buf[:n]
+
+ if err != nil && errors.Cause(err) != io.ErrUnexpectedEOF {
+ return "", errors.Wrap(err, "ReadFull")
+ }
+
+ return strings.TrimRight(string(buf), "\r\n"), nil
+}
+
+// readPasswordTerminal reads the password from the given reader which must be a
+// tty. Prompt is printed on the writer out before attempting to read the
+// password.
+func readPasswordTerminal(in *os.File, out io.Writer, prompt string) (password string, err error) {
+ fmt.Fprint(out, prompt)
+ buf, err := terminal.ReadPassword(int(in.Fd()))
+ fmt.Fprintln(out)
+ if err != nil {
+ return "", errors.Wrap(err, "ReadPassword")
+ }
+
+ password = string(buf)
+ return password, nil
+}
+
+// ReadPassword reads the password from a password file, the environment
+// variable RESTIC_PASSWORD or prompts the user.
+func ReadPassword(opts GlobalOptions, prompt string) (string, error) {
+ if opts.password != "" {
+ return opts.password, nil
+ }
+
+ var (
+ password string
+ err error
+ )
+
+ if stdinIsTerminal() {
+ password, err = readPasswordTerminal(os.Stdin, os.Stderr, prompt)
+ } else {
+ password, err = readPassword(os.Stdin)
+ }
+
+ if err != nil {
+ return "", errors.Wrap(err, "unable to read password")
+ }
+
+ if len(password) == 0 {
+ return "", errors.Fatal("an empty password is not a password")
+ }
+
+ return password, nil
+}
+
+// ReadPasswordTwice calls ReadPassword two times and returns an error when the
+// passwords don't match.
+func ReadPasswordTwice(gopts GlobalOptions, prompt1, prompt2 string) (string, error) {
+ pw1, err := ReadPassword(gopts, prompt1)
+ if err != nil {
+ return "", err
+ }
+ pw2, err := ReadPassword(gopts, prompt2)
+ if err != nil {
+ return "", err
+ }
+
+ if pw1 != pw2 {
+ return "", errors.Fatal("passwords do not match")
+ }
+
+ return pw1, nil
+}
+
+const maxKeys = 20
+
+// OpenRepository reads the password and opens the repository.
+func OpenRepository(opts GlobalOptions) (*repository.Repository, error) {
+ if opts.Repo == "" {
+ return nil, errors.Fatal("Please specify repository location (-r)")
+ }
+
+ be, err := open(opts.Repo, opts.extended)
+ if err != nil {
+ return nil, err
+ }
+
+ s := repository.New(be)
+
+ opts.password, err = ReadPassword(opts, "enter password for repository: ")
+ if err != nil {
+ return nil, err
+ }
+
+ err = s.SearchKey(context.TODO(), opts.password, maxKeys)
+ if err != nil {
+ return nil, err
+ }
+
+ return s, nil
+}
+
+func parseConfig(loc location.Location, opts options.Options) (interface{}, error) {
+ // only apply options for a particular backend here
+ opts = opts.Extract(loc.Scheme)
+
+ switch loc.Scheme {
+ case "local":
+ cfg := loc.Config.(local.Config)
+ if err := opts.Apply(loc.Scheme, &cfg); err != nil {
+ return nil, err
+ }
+
+ debug.Log("opening local repository at %#v", cfg)
+ return cfg, nil
+
+ case "sftp":
+ cfg := loc.Config.(sftp.Config)
+ if err := opts.Apply(loc.Scheme, &cfg); err != nil {
+ return nil, err
+ }
+
+ debug.Log("opening sftp repository at %#v", cfg)
+ return cfg, nil
+
+ case "s3":
+ cfg := loc.Config.(s3.Config)
+ if cfg.KeyID == "" {
+ cfg.KeyID = os.Getenv("AWS_ACCESS_KEY_ID")
+ }
+
+ if cfg.Secret == "" {
+ cfg.Secret = os.Getenv("AWS_SECRET_ACCESS_KEY")
+ }
+
+ if err := opts.Apply(loc.Scheme, &cfg); err != nil {
+ return nil, err
+ }
+
+ debug.Log("opening s3 repository at %#v", cfg)
+ return cfg, nil
+
+ case "gs":
+ cfg := loc.Config.(gs.Config)
+ if cfg.ProjectID == "" {
+ cfg.ProjectID = os.Getenv("GOOGLE_PROJECT_ID")
+ }
+
+ if cfg.JSONKeyPath == "" {
+ if path := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS"); path != "" {
+ // Check read access
+ if _, err := ioutil.ReadFile(path); err != nil {
+ return nil, errors.Fatalf("Failed to read google credential from file %v: %v", path, err)
+ }
+ cfg.JSONKeyPath = path
+ } else {
+ return nil, errors.Fatal("No credential file path is set")
+ }
+ }
+
+ if err := opts.Apply(loc.Scheme, &cfg); err != nil {
+ return nil, err
+ }
+
+ debug.Log("opening gs repository at %#v", cfg)
+ return cfg, nil
+
+ case "azure":
+ cfg := loc.Config.(azure.Config)
+ if cfg.AccountName == "" {
+ cfg.AccountName = os.Getenv("AZURE_ACCOUNT_NAME")
+ }
+
+ if cfg.AccountKey == "" {
+ cfg.AccountKey = os.Getenv("AZURE_ACCOUNT_KEY")
+ }
+
+ if err := opts.Apply(loc.Scheme, &cfg); err != nil {
+ return nil, err
+ }
+
+ debug.Log("opening gs repository at %#v", cfg)
+ return cfg, nil
+
+ case "swift":
+ cfg := loc.Config.(swift.Config)
+
+ if err := swift.ApplyEnvironment("", &cfg); err != nil {
+ return nil, err
+ }
+
+ if err := opts.Apply(loc.Scheme, &cfg); err != nil {
+ return nil, err
+ }
+
+ debug.Log("opening swift repository at %#v", cfg)
+ return cfg, nil
+
+ case "b2":
+ cfg := loc.Config.(b2.Config)
+
+ if cfg.AccountID == "" {
+ cfg.AccountID = os.Getenv("B2_ACCOUNT_ID")
+ }
+
+ if cfg.Key == "" {
+ cfg.Key = os.Getenv("B2_ACCOUNT_KEY")
+ }
+
+ if err := opts.Apply(loc.Scheme, &cfg); err != nil {
+ return nil, err
+ }
+
+ debug.Log("opening b2 repository at %#v", cfg)
+ return cfg, nil
+ case "rest":
+ cfg := loc.Config.(rest.Config)
+ if err := opts.Apply(loc.Scheme, &cfg); err != nil {
+ return nil, err
+ }
+
+ debug.Log("opening rest repository at %#v", cfg)
+ return cfg, nil
+ }
+
+ return nil, errors.Fatalf("invalid backend: %q", loc.Scheme)
+}
+
+// Open the backend specified by a location config.
+func open(s string, opts options.Options) (restic.Backend, error) {
+ debug.Log("parsing location %v", s)
+ loc, err := location.Parse(s)
+ if err != nil {
+ return nil, errors.Fatalf("parsing repository location failed: %v", err)
+ }
+
+ var be restic.Backend
+
+ cfg, err := parseConfig(loc, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ switch loc.Scheme {
+ case "local":
+ be, err = local.Open(cfg.(local.Config))
+ case "sftp":
+ be, err = sftp.Open(cfg.(sftp.Config))
+ case "s3":
+ be, err = s3.Open(cfg.(s3.Config))
+ case "gs":
+ be, err = gs.Open(cfg.(gs.Config))
+ case "azure":
+ be, err = azure.Open(cfg.(azure.Config))
+ case "swift":
+ be, err = swift.Open(cfg.(swift.Config))
+ case "b2":
+ be, err = b2.Open(cfg.(b2.Config))
+ case "rest":
+ be, err = rest.Open(cfg.(rest.Config))
+
+ default:
+ return nil, errors.Fatalf("invalid backend: %q", loc.Scheme)
+ }
+
+ if err != nil {
+ return nil, errors.Fatalf("unable to open repo at %v: %v", s, err)
+ }
+
+ // check if config is there
+ fi, err := be.Stat(context.TODO(), restic.Handle{Type: restic.ConfigFile})
+ if err != nil {
+ return nil, errors.Fatalf("unable to open config file: %v\nIs there a repository at the following location?\n%v", err, s)
+ }
+
+ if fi.Size == 0 {
+ return nil, errors.New("config file has zero size, invalid repository?")
+ }
+
+ return be, nil
+}
+
+// Create the backend specified by URI.
+func create(s string, opts options.Options) (restic.Backend, error) {
+ debug.Log("parsing location %v", s)
+ loc, err := location.Parse(s)
+ if err != nil {
+ return nil, err
+ }
+
+ cfg, err := parseConfig(loc, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ switch loc.Scheme {
+ case "local":
+ return local.Create(cfg.(local.Config))
+ case "sftp":
+ return sftp.Create(cfg.(sftp.Config))
+ case "s3":
+ return s3.Create(cfg.(s3.Config))
+ case "gs":
+ return gs.Create(cfg.(gs.Config))
+ case "azure":
+ return azure.Create(cfg.(azure.Config))
+ case "swift":
+ return swift.Open(cfg.(swift.Config))
+ case "b2":
+ return b2.Create(cfg.(b2.Config))
+ case "rest":
+ return rest.Create(cfg.(rest.Config))
+ }
+
+ debug.Log("invalid repository scheme: %v", s)
+ return nil, errors.Fatalf("invalid scheme %q", loc.Scheme)
+}
diff --git a/cmd/restic/global_debug.go b/cmd/restic/global_debug.go
new file mode 100644
index 000000000..7cad172f6
--- /dev/null
+++ b/cmd/restic/global_debug.go
@@ -0,0 +1,75 @@
+// +build debug
+
+package main
+
+import (
+ "fmt"
+ "net/http"
+ _ "net/http/pprof"
+ "os"
+
+ "github.com/restic/restic/internal/errors"
+ "github.com/restic/restic/internal/repository"
+
+ "github.com/pkg/profile"
+)
+
+var (
+ listenMemoryProfile string
+ memProfilePath string
+ cpuProfilePath string
+ insecure bool
+)
+
+func init() {
+ f := cmdRoot.PersistentFlags()
+ f.StringVar(&listenMemoryProfile, "listen-profile", "", "listen on this `address:port` for memory profiling")
+ f.StringVar(&memProfilePath, "mem-profile", "", "write memory profile to `dir`")
+ f.StringVar(&cpuProfilePath, "cpu-profile", "", "write cpu profile to `dir`")
+ f.BoolVar(&insecure, "insecure-kdf", false, "use insecure KDF settings")
+}
+
+type fakeTestingTB struct{}
+
+func (fakeTestingTB) Logf(msg string, args ...interface{}) {
+ fmt.Fprintf(os.Stderr, msg, args...)
+}
+
+func runDebug() error {
+ if listenMemoryProfile != "" {
+ fmt.Fprintf(os.Stderr, "running memory profile HTTP server on %v\n", listenMemoryProfile)
+ go func() {
+ err := http.ListenAndServe(listenMemoryProfile, nil)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "memory profile listen failed: %v\n", err)
+ }
+ }()
+ }
+
+ if memProfilePath != "" && cpuProfilePath != "" {
+ return errors.Fatal("only one profile (memory or CPU) may be activated at the same time")
+ }
+
+ var prof interface {
+ Stop()
+ }
+
+ if memProfilePath != "" {
+ prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.MemProfile, profile.ProfilePath(memProfilePath))
+ } else if cpuProfilePath != "" {
+ prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.CPUProfile, profile.ProfilePath(cpuProfilePath))
+ }
+
+ if prof != nil {
+ AddCleanupHandler(func() error {
+ prof.Stop()
+ return nil
+ })
+ }
+
+ if insecure {
+ repository.TestUseLowSecurityKDFParameters(fakeTestingTB{})
+ }
+
+ return nil
+}
diff --git a/cmd/restic/global_release.go b/cmd/restic/global_release.go
new file mode 100644
index 000000000..04c7cba31
--- /dev/null
+++ b/cmd/restic/global_release.go
@@ -0,0 +1,6 @@
+// +build !debug
+
+package main
+
+// runDebug is a noop without the debug tag.
+func runDebug() error { return nil }
diff --git a/cmd/restic/integration_fuse_test.go b/cmd/restic/integration_fuse_test.go
new file mode 100644
index 000000000..4d70212a5
--- /dev/null
+++ b/cmd/restic/integration_fuse_test.go
@@ -0,0 +1,206 @@
+// +build !openbsd
+// +build !windows
+
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/restic/restic/internal/repository"
+ "github.com/restic/restic/internal/restic"
+ . "github.com/restic/restic/internal/test"
+)
+
+const (
+ mountWait = 20
+ mountSleep = 100 * time.Millisecond
+ mountTestSubdir = "snapshots"
+)
+
+func snapshotsDirExists(t testing.TB, dir string) bool {
+ f, err := os.Open(filepath.Join(dir, mountTestSubdir))
+ if err != nil && os.IsNotExist(err) {
+ return false
+ }
+
+ if err != nil {
+ t.Error(err)
+ }
+
+ if err := f.Close(); err != nil {
+ t.Error(err)
+ }
+
+ return true
+}
+
+// waitForMount blocks (max mountWait * mountSleep) until the subdir
+// "snapshots" appears in the dir.
+func waitForMount(t testing.TB, dir string) {
+ for i := 0; i < mountWait; i++ {
+ if snapshotsDirExists(t, dir) {
+ t.Log("mounted directory is ready")
+ return
+ }
+
+ time.Sleep(mountSleep)
+ }
+
+ t.Errorf("subdir %q of dir %s never appeared", mountTestSubdir, dir)
+}
+
+func testRunMount(t testing.TB, gopts GlobalOptions, dir string) {
+ opts := MountOptions{}
+ OK(t, runMount(opts, gopts, []string{dir}))
+}
+
+func testRunUmount(t testing.TB, gopts GlobalOptions, dir string) {
+ var err error
+ for i := 0; i < mountWait; i++ {
+ if err = umount(dir); err == nil {
+ t.Logf("directory %v umounted", dir)
+ return
+ }
+
+ time.Sleep(mountSleep)
+ }
+
+ t.Errorf("unable to umount dir %v, last error was: %v", dir, err)
+}
+
+func listSnapshots(t testing.TB, dir string) []string {
+ snapshotsDir, err := os.Open(filepath.Join(dir, "snapshots"))
+ OK(t, err)
+ names, err := snapshotsDir.Readdirnames(-1)
+ OK(t, err)
+ OK(t, snapshotsDir.Close())
+ return names
+}
+
+func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Repository, mountpoint, repodir string, snapshotIDs restic.IDs) {
+ t.Logf("checking for %d snapshots: %v", len(snapshotIDs), snapshotIDs)
+
+ go testRunMount(t, global, mountpoint)
+ waitForMount(t, mountpoint)
+ defer testRunUmount(t, global, mountpoint)
+
+ if !snapshotsDirExists(t, mountpoint) {
+ t.Fatal(`virtual directory "snapshots" doesn't exist`)
+ }
+
+ ids := listSnapshots(t, repodir)
+ t.Logf("found %v snapshots in repo: %v", len(ids), ids)
+
+ namesInSnapshots := listSnapshots(t, mountpoint)
+ t.Logf("found %v snapshots in fuse mount: %v", len(namesInSnapshots), namesInSnapshots)
+ Assert(t,
+ len(namesInSnapshots) == len(snapshotIDs),
+ "Invalid number of snapshots: expected %d, got %d", len(snapshotIDs), len(namesInSnapshots))
+
+ namesMap := make(map[string]bool)
+ for _, name := range namesInSnapshots {
+ namesMap[name] = false
+ }
+
+ for _, id := range snapshotIDs {
+ snapshot, err := restic.LoadSnapshot(context.TODO(), repo, id)
+ OK(t, err)
+
+ ts := snapshot.Time.Format(time.RFC3339)
+ present, ok := namesMap[ts]
+ if !ok {
+ t.Errorf("Snapshot %v (%q) isn't present in fuse dir", id.Str(), ts)
+ }
+
+ for i := 1; present; i++ {
+ ts = fmt.Sprintf("%s-%d", snapshot.Time.Format(time.RFC3339), i)
+ present, ok = namesMap[ts]
+ if !ok {
+ t.Errorf("Snapshot %v (%q) isn't present in fuse dir", id.Str(), ts)
+ }
+
+ if !present {
+ break
+ }
+ }
+
+ namesMap[ts] = true
+ }
+
+ for name, present := range namesMap {
+ Assert(t, present, "Directory %s is present in fuse dir but is not a snapshot", name)
+ }
+}
+
+func TestMount(t *testing.T) {
+ if !RunFuseTest {
+ t.Skip("Skipping fuse tests")
+ }
+
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ repo, err := OpenRepository(env.gopts)
+ OK(t, err)
+
+ // We remove the mountpoint now to check that cmdMount creates it
+ RemoveAll(t, env.mountpoint)
+
+ checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, []restic.ID{})
+
+ SetupTarTestFixture(t, env.testdata, filepath.Join("testdata", "backup-data.tar.gz"))
+
+ // first backup
+ testRunBackup(t, []string{env.testdata}, BackupOptions{}, env.gopts)
+ snapshotIDs := testRunList(t, "snapshots", env.gopts)
+ Assert(t, len(snapshotIDs) == 1,
+ "expected one snapshot, got %v", snapshotIDs)
+
+ checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs)
+
+ // second backup, implicit incremental
+ testRunBackup(t, []string{env.testdata}, BackupOptions{}, env.gopts)
+ snapshotIDs = testRunList(t, "snapshots", env.gopts)
+ Assert(t, len(snapshotIDs) == 2,
+ "expected two snapshots, got %v", snapshotIDs)
+
+ checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs)
+
+ // third backup, explicit incremental
+ bopts := BackupOptions{Parent: snapshotIDs[0].String()}
+ testRunBackup(t, []string{env.testdata}, bopts, env.gopts)
+ snapshotIDs = testRunList(t, "snapshots", env.gopts)
+ Assert(t, len(snapshotIDs) == 3,
+ "expected three snapshots, got %v", snapshotIDs)
+
+ checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs)
+}
+
+func TestMountSameTimestamps(t *testing.T) {
+ if !RunFuseTest {
+ t.Skip("Skipping fuse tests")
+ }
+
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ SetupTarTestFixture(t, env.base, filepath.Join("testdata", "repo-same-timestamps.tar.gz"))
+
+ repo, err := OpenRepository(env.gopts)
+ OK(t, err)
+
+ ids := []restic.ID{
+ restic.TestParseID("280303689e5027328889a06d718b729e96a1ce6ae9ef8290bff550459ae611ee"),
+ restic.TestParseID("75ad6cdc0868e082f2596d5ab8705e9f7d87316f5bf5690385eeff8dbe49d9f5"),
+ restic.TestParseID("5fd0d8b2ef0fa5d23e58f1e460188abb0f525c0f0c4af8365a1280c807a80a1b"),
+ }
+
+ checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, ids)
+}
diff --git a/cmd/restic/integration_helpers_test.go b/cmd/restic/integration_helpers_test.go
new file mode 100644
index 000000000..3d0ad51ba
--- /dev/null
+++ b/cmd/restic/integration_helpers_test.go
@@ -0,0 +1,221 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "testing"
+
+ "github.com/restic/restic/internal/options"
+ "github.com/restic/restic/internal/repository"
+ . "github.com/restic/restic/internal/test"
+)
+
+type dirEntry struct {
+ path string
+ fi os.FileInfo
+ link uint64
+}
+
+func walkDir(dir string) <-chan *dirEntry {
+ ch := make(chan *dirEntry, 100)
+
+ go func() {
+ err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error: %v\n", err)
+ return nil
+ }
+
+ name, err := filepath.Rel(dir, path)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error: %v\n", err)
+ return nil
+ }
+
+ ch <- &dirEntry{
+ path: name,
+ fi: info,
+ link: nlink(info),
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Walk() error: %v\n", err)
+ }
+
+ close(ch)
+ }()
+
+ // first element is root
+ _ = <-ch
+
+ return ch
+}
+
+func isSymlink(fi os.FileInfo) bool {
+ mode := fi.Mode() & (os.ModeType | os.ModeCharDevice)
+ return mode == os.ModeSymlink
+}
+
+func sameModTime(fi1, fi2 os.FileInfo) bool {
+ switch runtime.GOOS {
+ case "darwin", "freebsd", "openbsd":
+ if isSymlink(fi1) && isSymlink(fi2) {
+ return true
+ }
+ }
+
+ return fi1.ModTime().Equal(fi2.ModTime())
+}
+
+// directoriesEqualContents checks if both directories contain exactly the same
+// contents.
+func directoriesEqualContents(dir1, dir2 string) bool {
+ ch1 := walkDir(dir1)
+ ch2 := walkDir(dir2)
+
+ changes := false
+
+ var a, b *dirEntry
+ for {
+ var ok bool
+
+ if ch1 != nil && a == nil {
+ a, ok = <-ch1
+ if !ok {
+ ch1 = nil
+ }
+ }
+
+ if ch2 != nil && b == nil {
+ b, ok = <-ch2
+ if !ok {
+ ch2 = nil
+ }
+ }
+
+ if ch1 == nil && ch2 == nil {
+ break
+ }
+
+ if ch1 == nil {
+ fmt.Printf("+%v\n", b.path)
+ changes = true
+ } else if ch2 == nil {
+ fmt.Printf("-%v\n", a.path)
+ changes = true
+ } else if !a.equals(b) {
+ if a.path < b.path {
+ fmt.Printf("-%v\n", a.path)
+ changes = true
+ a = nil
+ continue
+ } else if a.path > b.path {
+ fmt.Printf("+%v\n", b.path)
+ changes = true
+ b = nil
+ continue
+ } else {
+ fmt.Printf("%%%v\n", a.path)
+ changes = true
+ }
+ }
+
+ a, b = nil, nil
+ }
+
+ if changes {
+ return false
+ }
+
+ return true
+}
+
+type dirStat struct {
+ files, dirs, other uint
+ size uint64
+}
+
+func isFile(fi os.FileInfo) bool {
+ return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0
+}
+
+// dirStats walks dir and collects stats.
+func dirStats(dir string) (stat dirStat) {
+ for entry := range walkDir(dir) {
+ if isFile(entry.fi) {
+ stat.files++
+ stat.size += uint64(entry.fi.Size())
+ continue
+ }
+
+ if entry.fi.IsDir() {
+ stat.dirs++
+ continue
+ }
+
+ stat.other++
+ }
+
+ return stat
+}
+
+type testEnvironment struct {
+ base, cache, repo, mountpoint, testdata string
+ gopts GlobalOptions
+}
+
+// withTestEnvironment creates a test environment and returns a cleanup
+// function which removes it.
+func withTestEnvironment(t testing.TB) (env *testEnvironment, cleanup func()) {
+ if !RunIntegrationTest {
+ t.Skip("integration tests disabled")
+ }
+
+ repository.TestUseLowSecurityKDFParameters(t)
+
+ tempdir, err := ioutil.TempDir(TestTempDir, "restic-test-")
+ OK(t, err)
+
+ env = &testEnvironment{
+ base: tempdir,
+ cache: filepath.Join(tempdir, "cache"),
+ repo: filepath.Join(tempdir, "repo"),
+ testdata: filepath.Join(tempdir, "testdata"),
+ mountpoint: filepath.Join(tempdir, "mount"),
+ }
+
+ OK(t, os.MkdirAll(env.mountpoint, 0700))
+ OK(t, os.MkdirAll(env.testdata, 0700))
+ OK(t, os.MkdirAll(env.cache, 0700))
+ OK(t, os.MkdirAll(env.repo, 0700))
+
+ env.gopts = GlobalOptions{
+ Repo: env.repo,
+ Quiet: true,
+ ctx: context.Background(),
+ password: TestPassword,
+ stdout: os.Stdout,
+ stderr: os.Stderr,
+ extended: make(options.Options),
+ }
+
+ // always overwrite global options
+ globalOptions = env.gopts
+
+ cleanup = func() {
+ if !TestCleanupTempDirs {
+ t.Logf("leaving temporary directory %v used for test", tempdir)
+ return
+ }
+ RemoveAll(t, tempdir)
+ }
+
+ return env, cleanup
+}
diff --git a/cmd/restic/integration_helpers_unix_test.go b/cmd/restic/integration_helpers_unix_test.go
new file mode 100644
index 000000000..2a06db63d
--- /dev/null
+++ b/cmd/restic/integration_helpers_unix_test.go
@@ -0,0 +1,70 @@
+//+build !windows
+
+package main
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "syscall"
+)
+
+func (e *dirEntry) equals(other *dirEntry) bool {
+ if e.path != other.path {
+ fmt.Fprintf(os.Stderr, "%v: path does not match (%v != %v)\n", e.path, e.path, other.path)
+ return false
+ }
+
+ if e.fi.Mode() != other.fi.Mode() {
+ fmt.Fprintf(os.Stderr, "%v: mode does not match (%v != %v)\n", e.path, e.fi.Mode(), other.fi.Mode())
+ return false
+ }
+
+ if !sameModTime(e.fi, other.fi) {
+ fmt.Fprintf(os.Stderr, "%v: ModTime does not match (%v != %v)\n", e.path, e.fi.ModTime(), other.fi.ModTime())
+ return false
+ }
+
+ stat, _ := e.fi.Sys().(*syscall.Stat_t)
+ stat2, _ := other.fi.Sys().(*syscall.Stat_t)
+
+ if stat.Uid != stat2.Uid {
+ fmt.Fprintf(os.Stderr, "%v: UID does not match (%v != %v)\n", e.path, stat.Uid, stat2.Uid)
+ return false
+ }
+
+ if stat.Gid != stat2.Gid {
+ fmt.Fprintf(os.Stderr, "%v: GID does not match (%v != %v)\n", e.path, stat.Gid, stat2.Gid)
+ return false
+ }
+
+ if stat.Nlink != stat2.Nlink {
+ fmt.Fprintf(os.Stderr, "%v: Number of links do not match (%v != %v)\n", e.path, stat.Nlink, stat2.Nlink)
+ return false
+ }
+
+ return true
+}
+
+func nlink(info os.FileInfo) uint64 {
+ stat, _ := info.Sys().(*syscall.Stat_t)
+ return uint64(stat.Nlink)
+}
+
+func createFileSetPerHardlink(dir string) map[uint64][]string {
+ var stat syscall.Stat_t
+ linkTests := make(map[uint64][]string)
+ files, err := ioutil.ReadDir(dir)
+ if err != nil {
+ return nil
+ }
+ for _, f := range files {
+
+ if err := syscall.Stat(filepath.Join(dir, f.Name()), &stat); err != nil {
+ return nil
+ }
+ linkTests[uint64(stat.Ino)] = append(linkTests[uint64(stat.Ino)], f.Name())
+ }
+ return linkTests
+}
diff --git a/cmd/restic/integration_helpers_windows_test.go b/cmd/restic/integration_helpers_windows_test.go
new file mode 100644
index 000000000..9e3fbac9b
--- /dev/null
+++ b/cmd/restic/integration_helpers_windows_test.go
@@ -0,0 +1,49 @@
+//+build windows
+
+package main
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+)
+
+func (e *dirEntry) equals(other *dirEntry) bool {
+ if e.path != other.path {
+ fmt.Fprintf(os.Stderr, "%v: path does not match (%v != %v)\n", e.path, e.path, other.path)
+ return false
+ }
+
+ if e.fi.Mode() != other.fi.Mode() {
+ fmt.Fprintf(os.Stderr, "%v: mode does not match (%v != %v)\n", e.path, e.fi.Mode(), other.fi.Mode())
+ return false
+ }
+
+ if !sameModTime(e.fi, other.fi) {
+ fmt.Fprintf(os.Stderr, "%v: ModTime does not match (%v != %v)\n", e.path, e.fi.ModTime(), other.fi.ModTime())
+ return false
+ }
+
+ return true
+}
+
+func nlink(info os.FileInfo) uint64 {
+ return 1
+}
+
+func inode(info os.FileInfo) uint64 {
+ return uint64(0)
+}
+
+func createFileSetPerHardlink(dir string) map[uint64][]string {
+ linkTests := make(map[uint64][]string)
+ files, err := ioutil.ReadDir(dir)
+ if err != nil {
+ return nil
+ }
+ for i, f := range files {
+ linkTests[uint64(i)] = append(linkTests[uint64(i)], f.Name())
+ i++
+ }
+ return linkTests
+}
diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go
new file mode 100644
index 000000000..449b1c93e
--- /dev/null
+++ b/cmd/restic/integration_test.go
@@ -0,0 +1,1310 @@
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/rand"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ mrand "math/rand"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/restic/restic/internal/errors"
+ "github.com/restic/restic/internal/restic"
+
+ "github.com/restic/restic/internal/debug"
+ "github.com/restic/restic/internal/filter"
+ "github.com/restic/restic/internal/repository"
+ . "github.com/restic/restic/internal/test"
+)
+
+func parseIDsFromReader(t testing.TB, rd io.Reader) restic.IDs {
+ IDs := restic.IDs{}
+ sc := bufio.NewScanner(rd)
+
+ for sc.Scan() {
+ id, err := restic.ParseID(sc.Text())
+ if err != nil {
+ t.Logf("parse id %v: %v", sc.Text(), err)
+ continue
+ }
+
+ IDs = append(IDs, id)
+ }
+
+ return IDs
+}
+
+func testRunInit(t testing.TB, opts GlobalOptions) {
+ repository.TestUseLowSecurityKDFParameters(t)
+ restic.TestSetLockTimeout(t, 0)
+
+ OK(t, runInit(opts, nil))
+ t.Logf("repository initialized at %v", opts.Repo)
+}
+
+func testRunBackup(t testing.TB, target []string, opts BackupOptions, gopts GlobalOptions) {
+ t.Logf("backing up %v", target)
+ OK(t, runBackup(opts, gopts, target))
+}
+
+func testRunList(t testing.TB, tpe string, opts GlobalOptions) restic.IDs {
+ buf := bytes.NewBuffer(nil)
+ globalOptions.stdout = buf
+ defer func() {
+ globalOptions.stdout = os.Stdout
+ }()
+
+ OK(t, runList(opts, []string{tpe}))
+ return parseIDsFromReader(t, buf)
+}
+
+func testRunRestore(t testing.TB, opts GlobalOptions, dir string, snapshotID restic.ID) {
+ testRunRestoreExcludes(t, opts, dir, snapshotID, nil)
+}
+
+func testRunRestoreLatest(t testing.TB, gopts GlobalOptions, dir string, paths []string, host string) {
+ opts := RestoreOptions{
+ Target: dir,
+ Host: host,
+ Paths: paths,
+ }
+
+ OK(t, runRestore(opts, gopts, []string{"latest"}))
+}
+
+func testRunRestoreExcludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, excludes []string) {
+ opts := RestoreOptions{
+ Target: dir,
+ Exclude: excludes,
+ }
+
+ OK(t, runRestore(opts, gopts, []string{snapshotID.String()}))
+}
+
+func testRunRestoreIncludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, includes []string) {
+ opts := RestoreOptions{
+ Target: dir,
+ Include: includes,
+ }
+
+ OK(t, runRestore(opts, gopts, []string{snapshotID.String()}))
+}
+
+func testRunCheck(t testing.TB, gopts GlobalOptions) {
+ opts := CheckOptions{
+ ReadData: true,
+ CheckUnused: true,
+ }
+ OK(t, runCheck(opts, gopts, nil))
+}
+
+func testRunCheckOutput(gopts GlobalOptions) (string, error) {
+ buf := bytes.NewBuffer(nil)
+
+ globalOptions.stdout = buf
+ defer func() {
+ globalOptions.stdout = os.Stdout
+ }()
+
+ opts := CheckOptions{
+ ReadData: true,
+ }
+
+ err := runCheck(opts, gopts, nil)
+ return string(buf.Bytes()), err
+}
+
+func testRunRebuildIndex(t testing.TB, gopts GlobalOptions) {
+ globalOptions.stdout = ioutil.Discard
+ defer func() {
+ globalOptions.stdout = os.Stdout
+ }()
+
+ OK(t, runRebuildIndex(gopts))
+}
+
+func testRunLs(t testing.TB, gopts GlobalOptions, snapshotID string) []string {
+ buf := bytes.NewBuffer(nil)
+ globalOptions.stdout = buf
+ quiet := globalOptions.Quiet
+ globalOptions.Quiet = true
+ defer func() {
+ globalOptions.stdout = os.Stdout
+ globalOptions.Quiet = quiet
+ }()
+
+ opts := LsOptions{}
+
+ OK(t, runLs(opts, gopts, []string{snapshotID}))
+
+ return strings.Split(string(buf.Bytes()), "\n")
+}
+
+func testRunFind(t testing.TB, wantJSON bool, gopts GlobalOptions, pattern string) []byte {
+ buf := bytes.NewBuffer(nil)
+ globalOptions.stdout = buf
+ globalOptions.JSON = wantJSON
+ defer func() {
+ globalOptions.stdout = os.Stdout
+ globalOptions.JSON = false
+ }()
+
+ opts := FindOptions{}
+
+ OK(t, runFind(opts, gopts, []string{pattern}))
+
+ return buf.Bytes()
+}
+
+func testRunSnapshots(t testing.TB, gopts GlobalOptions) (newest *Snapshot, snapmap map[restic.ID]Snapshot) {
+ buf := bytes.NewBuffer(nil)
+ globalOptions.stdout = buf
+ globalOptions.JSON = true
+ defer func() {
+ globalOptions.stdout = os.Stdout
+ globalOptions.JSON = gopts.JSON
+ }()
+
+ opts := SnapshotOptions{}
+
+ OK(t, runSnapshots(opts, globalOptions, []string{}))
+
+ snapshots := []Snapshot{}
+ OK(t, json.Unmarshal(buf.Bytes(), &snapshots))
+
+ snapmap = make(map[restic.ID]Snapshot, len(snapshots))
+ for _, sn := range snapshots {
+ snapmap[*sn.ID] = sn
+ if newest == nil || sn.Time.After(newest.Time) {
+ newest = &sn
+ }
+ }
+ return
+}
+
+func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) {
+ opts := ForgetOptions{}
+ OK(t, runForget(opts, gopts, args))
+}
+
+func testRunPrune(t testing.TB, gopts GlobalOptions) {
+ OK(t, runPrune(gopts))
+}
+
+func TestBackup(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := filepath.Join("testdata", "backup-data.tar.gz")
+ fd, err := os.Open(datafile)
+ if os.IsNotExist(errors.Cause(err)) {
+ t.Skipf("unable to find data file %q, skipping", datafile)
+ return
+ }
+ OK(t, err)
+ OK(t, fd.Close())
+
+ testRunInit(t, env.gopts)
+
+ SetupTarTestFixture(t, env.testdata, datafile)
+ opts := BackupOptions{}
+
+ // first backup
+ testRunBackup(t, []string{env.testdata}, opts, env.gopts)
+ snapshotIDs := testRunList(t, "snapshots", env.gopts)
+ Assert(t, len(snapshotIDs) == 1,
+ "expected one snapshot, got %v", snapshotIDs)
+
+ testRunCheck(t, env.gopts)
+ stat1 := dirStats(env.repo)
+
+ // second backup, implicit incremental
+ testRunBackup(t, []string{env.testdata}, opts, env.gopts)
+ snapshotIDs = testRunList(t, "snapshots", env.gopts)
+ Assert(t, len(snapshotIDs) == 2,
+ "expected two snapshots, got %v", snapshotIDs)
+
+ stat2 := dirStats(env.repo)
+ if stat2.size > stat1.size+stat1.size/10 {
+ t.Error("repository size has grown by more than 10 percent")
+ }
+ t.Logf("repository grown by %d bytes", stat2.size-stat1.size)
+
+ testRunCheck(t, env.gopts)
+ // third backup, explicit incremental
+ opts.Parent = snapshotIDs[0].String()
+ testRunBackup(t, []string{env.testdata}, opts, env.gopts)
+ snapshotIDs = testRunList(t, "snapshots", env.gopts)
+ Assert(t, len(snapshotIDs) == 3,
+ "expected three snapshots, got %v", snapshotIDs)
+
+ stat3 := dirStats(env.repo)
+ if stat3.size > stat1.size+stat1.size/10 {
+ t.Error("repository size has grown by more than 10 percent")
+ }
+ t.Logf("repository grown by %d bytes", stat3.size-stat2.size)
+
+ // restore all backups and compare
+ for i, snapshotID := range snapshotIDs {
+ restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
+ t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir)
+ testRunRestore(t, env.gopts, restoredir, snapshotIDs[0])
+ Assert(t, directoriesEqualContents(env.testdata, filepath.Join(restoredir, "testdata")),
+ "directories are not equal")
+ }
+
+ testRunCheck(t, env.gopts)
+}
+
+func TestBackupNonExistingFile(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := filepath.Join("testdata", "backup-data.tar.gz")
+ fd, err := os.Open(datafile)
+ if os.IsNotExist(errors.Cause(err)) {
+ t.Skipf("unable to find data file %q, skipping", datafile)
+ return
+ }
+ OK(t, err)
+ OK(t, fd.Close())
+
+ SetupTarTestFixture(t, env.testdata, datafile)
+
+ testRunInit(t, env.gopts)
+ globalOptions.stderr = ioutil.Discard
+ defer func() {
+ globalOptions.stderr = os.Stderr
+ }()
+
+ p := filepath.Join(env.testdata, "0", "0")
+ dirs := []string{
+ filepath.Join(p, "0"),
+ filepath.Join(p, "1"),
+ filepath.Join(p, "nonexisting"),
+ filepath.Join(p, "5"),
+ }
+
+ opts := BackupOptions{}
+
+ testRunBackup(t, dirs, opts, env.gopts)
+}
+
+func TestBackupMissingFile1(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := filepath.Join("testdata", "backup-data.tar.gz")
+ fd, err := os.Open(datafile)
+ if os.IsNotExist(errors.Cause(err)) {
+ t.Skipf("unable to find data file %q, skipping", datafile)
+ return
+ }
+ OK(t, err)
+ OK(t, fd.Close())
+
+ SetupTarTestFixture(t, env.testdata, datafile)
+
+ testRunInit(t, env.gopts)
+ globalOptions.stderr = ioutil.Discard
+ defer func() {
+ globalOptions.stderr = os.Stderr
+ }()
+
+ ranHook := false
+ debug.Hook("pipe.walk1", func(context interface{}) {
+ pathname := context.(string)
+
+ if pathname != filepath.Join("testdata", "0", "0", "9") {
+ return
+ }
+
+ t.Logf("in hook, removing test file testdata/0/0/9/37")
+ ranHook = true
+
+ OK(t, os.Remove(filepath.Join(env.testdata, "0", "0", "9", "37")))
+ })
+
+ opts := BackupOptions{}
+
+ testRunBackup(t, []string{env.testdata}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ Assert(t, ranHook, "hook did not run")
+ debug.RemoveHook("pipe.walk1")
+}
+
+func TestBackupMissingFile2(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := filepath.Join("testdata", "backup-data.tar.gz")
+ fd, err := os.Open(datafile)
+ if os.IsNotExist(errors.Cause(err)) {
+ t.Skipf("unable to find data file %q, skipping", datafile)
+ return
+ }
+ OK(t, err)
+ OK(t, fd.Close())
+
+ SetupTarTestFixture(t, env.testdata, datafile)
+
+ testRunInit(t, env.gopts)
+
+ globalOptions.stderr = ioutil.Discard
+ defer func() {
+ globalOptions.stderr = os.Stderr
+ }()
+
+ ranHook := false
+ debug.Hook("pipe.walk2", func(context interface{}) {
+ pathname := context.(string)
+
+ if pathname != filepath.Join("testdata", "0", "0", "9", "37") {
+ return
+ }
+
+ t.Logf("in hook, removing test file testdata/0/0/9/37")
+ ranHook = true
+
+ OK(t, os.Remove(filepath.Join(env.testdata, "0", "0", "9", "37")))
+ })
+
+ opts := BackupOptions{}
+
+ testRunBackup(t, []string{env.testdata}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ Assert(t, ranHook, "hook did not run")
+ debug.RemoveHook("pipe.walk2")
+}
+
+func TestBackupChangedFile(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := filepath.Join("testdata", "backup-data.tar.gz")
+ fd, err := os.Open(datafile)
+ if os.IsNotExist(errors.Cause(err)) {
+ t.Skipf("unable to find data file %q, skipping", datafile)
+ return
+ }
+ OK(t, err)
+ OK(t, fd.Close())
+
+ SetupTarTestFixture(t, env.testdata, datafile)
+
+ testRunInit(t, env.gopts)
+
+ globalOptions.stderr = ioutil.Discard
+ defer func() {
+ globalOptions.stderr = os.Stderr
+ }()
+
+ modFile := filepath.Join(env.testdata, "0", "0", "6", "18")
+
+ ranHook := false
+ debug.Hook("archiver.SaveFile", func(context interface{}) {
+ pathname := context.(string)
+
+ if pathname != modFile {
+ return
+ }
+
+ t.Logf("in hook, modifying test file %v", modFile)
+ ranHook = true
+
+ OK(t, ioutil.WriteFile(modFile, []byte("modified"), 0600))
+ })
+
+ opts := BackupOptions{}
+
+ testRunBackup(t, []string{env.testdata}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ Assert(t, ranHook, "hook did not run")
+ debug.RemoveHook("archiver.SaveFile")
+}
+
+func TestBackupDirectoryError(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := filepath.Join("testdata", "backup-data.tar.gz")
+ fd, err := os.Open(datafile)
+ if os.IsNotExist(errors.Cause(err)) {
+ t.Skipf("unable to find data file %q, skipping", datafile)
+ return
+ }
+ OK(t, err)
+ OK(t, fd.Close())
+
+ SetupTarTestFixture(t, env.testdata, datafile)
+
+ testRunInit(t, env.gopts)
+
+ globalOptions.stderr = ioutil.Discard
+ defer func() {
+ globalOptions.stderr = os.Stderr
+ }()
+
+ ranHook := false
+
+ testdir := filepath.Join(env.testdata, "0", "0", "9")
+
+ // install hook that removes the dir right before readdirnames()
+ debug.Hook("pipe.readdirnames", func(context interface{}) {
+ path := context.(string)
+
+ if path != testdir {
+ return
+ }
+
+ t.Logf("in hook, removing test file %v", testdir)
+ ranHook = true
+
+ OK(t, os.RemoveAll(testdir))
+ })
+
+ testRunBackup(t, []string{filepath.Join(env.testdata, "0", "0")}, BackupOptions{}, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ Assert(t, ranHook, "hook did not run")
+ debug.RemoveHook("pipe.walk2")
+
+ snapshots := testRunList(t, "snapshots", env.gopts)
+ Assert(t, len(snapshots) > 0,
+ "no snapshots found in repo (%v)", datafile)
+
+ files := testRunLs(t, env.gopts, snapshots[0].String())
+
+ Assert(t, len(files) > 1, "snapshot is empty")
+}
+
+func includes(haystack []string, needle string) bool {
+ for _, s := range haystack {
+ if s == needle {
+ return true
+ }
+ }
+
+ return false
+}
+
+func loadSnapshotMap(t testing.TB, gopts GlobalOptions) map[string]struct{} {
+ snapshotIDs := testRunList(t, "snapshots", gopts)
+
+ m := make(map[string]struct{})
+ for _, id := range snapshotIDs {
+ m[id.String()] = struct{}{}
+ }
+
+ return m
+}
+
+func lastSnapshot(old, new map[string]struct{}) (map[string]struct{}, string) {
+ for k := range new {
+ if _, ok := old[k]; !ok {
+ old[k] = struct{}{}
+ return old, k
+ }
+ }
+
+ return old, ""
+}
+
+var backupExcludeFilenames = []string{
+ "testfile1",
+ "foo.tar.gz",
+ "private/secret/passwords.txt",
+ "work/source/test.c",
+}
+
+func TestBackupExclude(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ datadir := filepath.Join(env.base, "testdata")
+
+ for _, filename := range backupExcludeFilenames {
+ fp := filepath.Join(datadir, filename)
+ OK(t, os.MkdirAll(filepath.Dir(fp), 0755))
+
+ f, err := os.Create(fp)
+ OK(t, err)
+
+ fmt.Fprintf(f, filename)
+ OK(t, f.Close())
+ }
+
+ snapshots := make(map[string]struct{})
+
+ opts := BackupOptions{}
+
+ testRunBackup(t, []string{datadir}, opts, env.gopts)
+ snapshots, snapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
+ files := testRunLs(t, env.gopts, snapshotID)
+ Assert(t, includes(files, filepath.Join(string(filepath.Separator), "testdata", "foo.tar.gz")),
+ "expected file %q in first snapshot, but it's not included", "foo.tar.gz")
+
+ opts.Excludes = []string{"*.tar.gz"}
+ testRunBackup(t, []string{datadir}, opts, env.gopts)
+ snapshots, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
+ files = testRunLs(t, env.gopts, snapshotID)
+ Assert(t, !includes(files, filepath.Join(string(filepath.Separator), "testdata", "foo.tar.gz")),
+ "expected file %q not in first snapshot, but it's included", "foo.tar.gz")
+
+ opts.Excludes = []string{"*.tar.gz", "private/secret"}
+ testRunBackup(t, []string{datadir}, opts, env.gopts)
+ _, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
+ files = testRunLs(t, env.gopts, snapshotID)
+ Assert(t, !includes(files, filepath.Join(string(filepath.Separator), "testdata", "foo.tar.gz")),
+ "expected file %q not in first snapshot, but it's included", "foo.tar.gz")
+ Assert(t, !includes(files, filepath.Join(string(filepath.Separator), "testdata", "private", "secret", "passwords.txt")),
+ "expected file %q not in first snapshot, but it's included", "passwords.txt")
+}
+
+const (
+ incrementalFirstWrite = 20 * 1042 * 1024
+ incrementalSecondWrite = 12 * 1042 * 1024
+ incrementalThirdWrite = 4 * 1042 * 1024
+)
+
+func appendRandomData(filename string, bytes uint) error {
+ f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ fmt.Fprint(os.Stderr, err)
+ return err
+ }
+
+ _, err = f.Seek(0, 2)
+ if err != nil {
+ fmt.Fprint(os.Stderr, err)
+ return err
+ }
+
+ _, err = io.Copy(f, io.LimitReader(rand.Reader, int64(bytes)))
+ if err != nil {
+ fmt.Fprint(os.Stderr, err)
+ return err
+ }
+
+ return f.Close()
+}
+
+func TestIncrementalBackup(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ datadir := filepath.Join(env.base, "testdata")
+ testfile := filepath.Join(datadir, "testfile")
+
+ OK(t, appendRandomData(testfile, incrementalFirstWrite))
+
+ opts := BackupOptions{}
+
+ testRunBackup(t, []string{datadir}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+ stat1 := dirStats(env.repo)
+
+ OK(t, appendRandomData(testfile, incrementalSecondWrite))
+
+ testRunBackup(t, []string{datadir}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+ stat2 := dirStats(env.repo)
+ if stat2.size-stat1.size > incrementalFirstWrite {
+ t.Errorf("repository size has grown by more than %d bytes", incrementalFirstWrite)
+ }
+ t.Logf("repository grown by %d bytes", stat2.size-stat1.size)
+
+ OK(t, appendRandomData(testfile, incrementalThirdWrite))
+
+ testRunBackup(t, []string{datadir}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+ stat3 := dirStats(env.repo)
+ if stat3.size-stat2.size > incrementalFirstWrite {
+ t.Errorf("repository size has grown by more than %d bytes", incrementalFirstWrite)
+ }
+ t.Logf("repository grown by %d bytes", stat3.size-stat2.size)
+}
+
+func TestBackupTags(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := filepath.Join("testdata", "backup-data.tar.gz")
+ testRunInit(t, env.gopts)
+ SetupTarTestFixture(t, env.testdata, datafile)
+
+ opts := BackupOptions{}
+
+ testRunBackup(t, []string{env.testdata}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+ newest, _ := testRunSnapshots(t, env.gopts)
+ Assert(t, newest != nil, "expected a new backup, got nil")
+ Assert(t, len(newest.Tags) == 0,
+ "expected no tags, got %v", newest.Tags)
+
+ opts.Tags = []string{"NL"}
+ testRunBackup(t, []string{env.testdata}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+ newest, _ = testRunSnapshots(t, env.gopts)
+ Assert(t, newest != nil, "expected a new backup, got nil")
+ Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL",
+ "expected one NL tag, got %v", newest.Tags)
+}
+
+func testRunTag(t testing.TB, opts TagOptions, gopts GlobalOptions) {
+ OK(t, runTag(opts, gopts, []string{}))
+}
+
+func TestTag(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := filepath.Join("testdata", "backup-data.tar.gz")
+ testRunInit(t, env.gopts)
+ SetupTarTestFixture(t, env.testdata, datafile)
+
+ testRunBackup(t, []string{env.testdata}, BackupOptions{}, env.gopts)
+ testRunCheck(t, env.gopts)
+ newest, _ := testRunSnapshots(t, env.gopts)
+ Assert(t, newest != nil, "expected a new backup, got nil")
+ Assert(t, len(newest.Tags) == 0,
+ "expected no tags, got %v", newest.Tags)
+ Assert(t, newest.Original == nil,
+ "expected original ID to be nil, got %v", newest.Original)
+ originalID := *newest.ID
+
+ testRunTag(t, TagOptions{SetTags: []string{"NL"}}, env.gopts)
+ testRunCheck(t, env.gopts)
+ newest, _ = testRunSnapshots(t, env.gopts)
+ Assert(t, newest != nil, "expected a new backup, got nil")
+ Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL",
+ "set failed, expected one NL tag, got %v", newest.Tags)
+ Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
+ Assert(t, *newest.Original == originalID,
+ "expected original ID to be set to the first snapshot id")
+
+ testRunTag(t, TagOptions{AddTags: []string{"CH"}}, env.gopts)
+ testRunCheck(t, env.gopts)
+ newest, _ = testRunSnapshots(t, env.gopts)
+ Assert(t, newest != nil, "expected a new backup, got nil")
+ Assert(t, len(newest.Tags) == 2 && newest.Tags[0] == "NL" && newest.Tags[1] == "CH",
+ "add failed, expected CH,NL tags, got %v", newest.Tags)
+ Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
+ Assert(t, *newest.Original == originalID,
+ "expected original ID to be set to the first snapshot id")
+
+ testRunTag(t, TagOptions{RemoveTags: []string{"NL"}}, env.gopts)
+ testRunCheck(t, env.gopts)
+ newest, _ = testRunSnapshots(t, env.gopts)
+ Assert(t, newest != nil, "expected a new backup, got nil")
+ Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "CH",
+ "remove failed, expected one CH tag, got %v", newest.Tags)
+ Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
+ Assert(t, *newest.Original == originalID,
+ "expected original ID to be set to the first snapshot id")
+
+ testRunTag(t, TagOptions{AddTags: []string{"US", "RU"}}, env.gopts)
+ testRunTag(t, TagOptions{RemoveTags: []string{"CH", "US", "RU"}}, env.gopts)
+ testRunCheck(t, env.gopts)
+ newest, _ = testRunSnapshots(t, env.gopts)
+ Assert(t, newest != nil, "expected a new backup, got nil")
+ Assert(t, len(newest.Tags) == 0,
+ "expected no tags, got %v", newest.Tags)
+ Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
+ Assert(t, *newest.Original == originalID,
+ "expected original ID to be set to the first snapshot id")
+
+ // Check special case of removing all tags.
+ testRunTag(t, TagOptions{SetTags: []string{""}}, env.gopts)
+ testRunCheck(t, env.gopts)
+ newest, _ = testRunSnapshots(t, env.gopts)
+ Assert(t, newest != nil, "expected a new backup, got nil")
+ Assert(t, len(newest.Tags) == 0,
+ "expected no tags, got %v", newest.Tags)
+ Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
+ Assert(t, *newest.Original == originalID,
+ "expected original ID to be set to the first snapshot id")
+}
+
+func testRunKeyListOtherIDs(t testing.TB, gopts GlobalOptions) []string {
+ buf := bytes.NewBuffer(nil)
+
+ globalOptions.stdout = buf
+ defer func() {
+ globalOptions.stdout = os.Stdout
+ }()
+
+ OK(t, runKey(gopts, []string{"list"}))
+
+ scanner := bufio.NewScanner(buf)
+ exp := regexp.MustCompile(`^ ([a-f0-9]+) `)
+
+ IDs := []string{}
+ for scanner.Scan() {
+ if id := exp.FindStringSubmatch(scanner.Text()); id != nil {
+ IDs = append(IDs, id[1])
+ }
+ }
+
+ return IDs
+}
+
+func testRunKeyAddNewKey(t testing.TB, newPassword string, gopts GlobalOptions) {
+ testKeyNewPassword = newPassword
+ defer func() {
+ testKeyNewPassword = ""
+ }()
+
+ OK(t, runKey(gopts, []string{"add"}))
+}
+
+func testRunKeyPasswd(t testing.TB, newPassword string, gopts GlobalOptions) {
+ testKeyNewPassword = newPassword
+ defer func() {
+ testKeyNewPassword = ""
+ }()
+
+ OK(t, runKey(gopts, []string{"passwd"}))
+}
+
+func testRunKeyRemove(t testing.TB, gopts GlobalOptions, IDs []string) {
+ t.Logf("remove %d keys: %q\n", len(IDs), IDs)
+ for _, id := range IDs {
+ OK(t, runKey(gopts, []string{"remove", id}))
+ }
+}
+
+func TestKeyAddRemove(t *testing.T) {
+ passwordList := []string{
+ "OnnyiasyatvodsEvVodyawit",
+ "raicneirvOjEfEigonOmLasOd",
+ }
+
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ testRunKeyPasswd(t, "geheim2", env.gopts)
+ env.gopts.password = "geheim2"
+ t.Logf("changed password to %q", env.gopts.password)
+
+ for _, newPassword := range passwordList {
+ testRunKeyAddNewKey(t, newPassword, env.gopts)
+ t.Logf("added new password %q", newPassword)
+ env.gopts.password = newPassword
+ testRunKeyRemove(t, env.gopts, testRunKeyListOtherIDs(t, env.gopts))
+ }
+
+ env.gopts.password = passwordList[len(passwordList)-1]
+ t.Logf("testing access with last password %q\n", env.gopts.password)
+ OK(t, runKey(env.gopts, []string{"list"}))
+ testRunCheck(t, env.gopts)
+}
+
+func testFileSize(filename string, size int64) error {
+ fi, err := os.Stat(filename)
+ if err != nil {
+ return err
+ }
+
+ if fi.Size() != size {
+ return errors.Fatalf("wrong file size for %v: expected %v, got %v", filename, size, fi.Size())
+ }
+
+ return nil
+}
+
+func TestRestoreFilter(t *testing.T) {
+ testfiles := []struct {
+ name string
+ size uint
+ }{
+ {"testfile1.c", 100},
+ {"testfile2.exe", 101},
+ {"subdir1/subdir2/testfile3.docx", 102},
+ {"subdir1/subdir2/testfile4.c", 102},
+ }
+
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ for _, test := range testfiles {
+ p := filepath.Join(env.testdata, test.name)
+ OK(t, os.MkdirAll(filepath.Dir(p), 0755))
+ OK(t, appendRandomData(p, test.size))
+ }
+
+ opts := BackupOptions{}
+
+ testRunBackup(t, []string{env.testdata}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ snapshotID := testRunList(t, "snapshots", env.gopts)[0]
+
+ // no restore filter should restore all files
+ testRunRestore(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID)
+ for _, test := range testfiles {
+ OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", test.name), int64(test.size)))
+ }
+
+ for i, pat := range []string{"*.c", "*.exe", "*", "*file3*"} {
+ base := filepath.Join(env.base, fmt.Sprintf("restore%d", i+1))
+ testRunRestoreExcludes(t, env.gopts, base, snapshotID, []string{pat})
+ for _, test := range testfiles {
+ err := testFileSize(filepath.Join(base, "testdata", test.name), int64(test.size))
+ if ok, _ := filter.Match(pat, filepath.Base(test.name)); !ok {
+ OK(t, err)
+ } else {
+ Assert(t, os.IsNotExist(errors.Cause(err)),
+ "expected %v to not exist in restore step %v, but it exists, err %v", test.name, i+1, err)
+ }
+ }
+ }
+}
+
+func TestRestore(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ for i := 0; i < 10; i++ {
+ p := filepath.Join(env.testdata, fmt.Sprintf("foo/bar/testfile%v", i))
+ OK(t, os.MkdirAll(filepath.Dir(p), 0755))
+ OK(t, appendRandomData(p, uint(mrand.Intn(5<<21))))
+ }
+
+ opts := BackupOptions{}
+
+ testRunBackup(t, []string{env.testdata}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ // Restore latest without any filters
+ restoredir := filepath.Join(env.base, "restore")
+ testRunRestoreLatest(t, env.gopts, restoredir, nil, "")
+
+ Assert(t, directoriesEqualContents(env.testdata, filepath.Join(restoredir, filepath.Base(env.testdata))),
+ "directories are not equal")
+}
+
+func TestRestoreLatest(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ p := filepath.Join(env.testdata, "testfile.c")
+ OK(t, os.MkdirAll(filepath.Dir(p), 0755))
+ OK(t, appendRandomData(p, 100))
+
+ opts := BackupOptions{}
+
+ testRunBackup(t, []string{env.testdata}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ os.Remove(p)
+ OK(t, appendRandomData(p, 101))
+ testRunBackup(t, []string{env.testdata}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ // Restore latest without any filters
+ testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore0"), nil, "")
+ OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", "testfile.c"), int64(101)))
+
+ // Setup test files in different directories backed up in different snapshots
+ p1 := filepath.Join(env.testdata, "p1/testfile.c")
+ OK(t, os.MkdirAll(filepath.Dir(p1), 0755))
+ OK(t, appendRandomData(p1, 102))
+ testRunBackup(t, []string{filepath.Dir(p1)}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ p2 := filepath.Join(env.testdata, "p2/testfile.c")
+ OK(t, os.MkdirAll(filepath.Dir(p2), 0755))
+ OK(t, appendRandomData(p2, 103))
+ testRunBackup(t, []string{filepath.Dir(p2)}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ p1rAbs := filepath.Join(env.base, "restore1", "p1/testfile.c")
+ p2rAbs := filepath.Join(env.base, "restore2", "p2/testfile.c")
+
+ testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore1"), []string{filepath.Dir(p1)}, "")
+ OK(t, testFileSize(p1rAbs, int64(102)))
+ if _, err := os.Stat(p2rAbs); os.IsNotExist(errors.Cause(err)) {
+ Assert(t, os.IsNotExist(errors.Cause(err)),
+ "expected %v to not exist in restore, but it exists, err %v", p2rAbs, err)
+ }
+
+ testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore2"), []string{filepath.Dir(p2)}, "")
+ OK(t, testFileSize(p2rAbs, int64(103)))
+ if _, err := os.Stat(p1rAbs); os.IsNotExist(errors.Cause(err)) {
+ Assert(t, os.IsNotExist(errors.Cause(err)),
+ "expected %v to not exist in restore, but it exists, err %v", p1rAbs, err)
+ }
+}
+
+func TestRestoreWithPermissionFailure(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := filepath.Join("testdata", "repo-restore-permissions-test.tar.gz")
+ SetupTarTestFixture(t, env.base, datafile)
+
+ snapshots := testRunList(t, "snapshots", env.gopts)
+ Assert(t, len(snapshots) > 0,
+ "no snapshots found in repo (%v)", datafile)
+
+ globalOptions.stderr = ioutil.Discard
+ defer func() {
+ globalOptions.stderr = os.Stderr
+ }()
+
+ testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshots[0])
+
+ // make sure that all files have been restored, regardless of any
+ // permission errors
+ files := testRunLs(t, env.gopts, snapshots[0].String())
+ for _, filename := range files {
+ fi, err := os.Lstat(filepath.Join(env.base, "restore", filename))
+ OK(t, err)
+
+ Assert(t, !isFile(fi) || fi.Size() > 0,
+ "file %v restored, but filesize is 0", filename)
+ }
+}
+
+func setZeroModTime(filename string) error {
+ var utimes = []syscall.Timespec{
+ syscall.NsecToTimespec(0),
+ syscall.NsecToTimespec(0),
+ }
+
+ return syscall.UtimesNano(filename, utimes)
+}
+
+func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ p := filepath.Join(env.testdata, "subdir1", "subdir2", "subdir3", "file.ext")
+ OK(t, os.MkdirAll(filepath.Dir(p), 0755))
+ OK(t, appendRandomData(p, 200))
+ OK(t, setZeroModTime(filepath.Join(env.testdata, "subdir1", "subdir2")))
+
+ opts := BackupOptions{}
+
+ testRunBackup(t, []string{env.testdata}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ snapshotID := testRunList(t, "snapshots", env.gopts)[0]
+
+ // restore with filter "*.ext", this should restore "file.ext", but
+ // since the directories are ignored and only created because of
+ // "file.ext", no meta data should be restored for them.
+ testRunRestoreIncludes(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID, []string{"*.ext"})
+
+ f1 := filepath.Join(env.base, "restore0", "testdata", "subdir1", "subdir2")
+ fi, err := os.Stat(f1)
+ OK(t, err)
+
+ Assert(t, fi.ModTime() != time.Unix(0, 0),
+ "meta data of intermediate directory has been restore although it was ignored")
+
+ // restore with filter "*", this should restore meta data on everything.
+ testRunRestoreIncludes(t, env.gopts, filepath.Join(env.base, "restore1"), snapshotID, []string{"*"})
+
+ f2 := filepath.Join(env.base, "restore1", "testdata", "subdir1", "subdir2")
+ fi, err = os.Stat(f2)
+ OK(t, err)
+
+ Assert(t, fi.ModTime() == time.Unix(0, 0),
+ "meta data of intermediate directory hasn't been restore")
+}
+
+func TestFind(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := filepath.Join("testdata", "backup-data.tar.gz")
+ testRunInit(t, env.gopts)
+ SetupTarTestFixture(t, env.testdata, datafile)
+
+ opts := BackupOptions{}
+
+ testRunBackup(t, []string{env.testdata}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ results := testRunFind(t, false, env.gopts, "unexistingfile")
+ Assert(t, len(results) == 0, "unexisting file found in repo (%v)", datafile)
+
+ results = testRunFind(t, false, env.gopts, "testfile")
+ lines := strings.Split(string(results), "\n")
+ Assert(t, len(lines) == 2, "expected one file found in repo (%v)", datafile)
+
+ results = testRunFind(t, false, env.gopts, "testfile*")
+ lines = strings.Split(string(results), "\n")
+ Assert(t, len(lines) == 4, "expected three files found in repo (%v)", datafile)
+}
+
+type testMatch struct {
+ Path string `json:"path,omitempty"`
+ Permissions string `json:"permissions,omitempty"`
+ Size uint64 `json:"size,omitempty"`
+ Date time.Time `json:"date,omitempty"`
+ UID uint32 `json:"uid,omitempty"`
+ GID uint32 `json:"gid,omitempty"`
+}
+
+type testMatches struct {
+ Hits int `json:"hits,omitempty"`
+ SnapshotID string `json:"snapshot,omitempty"`
+ Matches []testMatch `json:"matches,omitempty"`
+}
+
+func TestFindJSON(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := filepath.Join("testdata", "backup-data.tar.gz")
+ testRunInit(t, env.gopts)
+ SetupTarTestFixture(t, env.testdata, datafile)
+
+ opts := BackupOptions{}
+
+ testRunBackup(t, []string{env.testdata}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ results := testRunFind(t, true, env.gopts, "unexistingfile")
+ matches := []testMatches{}
+ OK(t, json.Unmarshal(results, &matches))
+ Assert(t, len(matches) == 0, "expected no match in repo (%v)", datafile)
+
+ results = testRunFind(t, true, env.gopts, "testfile")
+ OK(t, json.Unmarshal(results, &matches))
+ Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile)
+ Assert(t, len(matches[0].Matches) == 1, "expected a single file to match (%v)", datafile)
+ Assert(t, matches[0].Hits == 1, "expected hits to show 1 match (%v)", datafile)
+
+ results = testRunFind(t, true, env.gopts, "testfile*")
+ OK(t, json.Unmarshal(results, &matches))
+ Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile)
+ Assert(t, len(matches[0].Matches) == 3, "expected 3 files to match (%v)", datafile)
+ Assert(t, matches[0].Hits == 3, "expected hits to show 3 matches (%v)", datafile)
+}
+
+func TestRebuildIndex(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := filepath.Join("..", "..", "internal", "checker", "testdata", "duplicate-packs-in-index-test-repo.tar.gz")
+ SetupTarTestFixture(t, env.base, datafile)
+
+ out, err := testRunCheckOutput(env.gopts)
+ if !strings.Contains(out, "contained in several indexes") {
+ t.Fatalf("did not find checker hint for packs in several indexes")
+ }
+
+ if err != nil {
+ t.Fatalf("expected no error from checker for test repository, got %v", err)
+ }
+
+ if !strings.Contains(out, "restic rebuild-index") {
+ t.Fatalf("did not find hint for rebuild-index command")
+ }
+
+ testRunRebuildIndex(t, env.gopts)
+
+ out, err = testRunCheckOutput(env.gopts)
+ if len(out) != 0 {
+ t.Fatalf("expected no output from the checker, got: %v", out)
+ }
+
+ if err != nil {
+ t.Fatalf("expected no error from checker after rebuild-index, got: %v", err)
+ }
+}
+
+func TestRebuildIndexAlwaysFull(t *testing.T) {
+ repository.IndexFull = func(*repository.Index) bool { return true }
+ TestRebuildIndex(t)
+}
+
+func TestCheckRestoreNoLock(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := filepath.Join("testdata", "small-repo.tar.gz")
+ SetupTarTestFixture(t, env.base, datafile)
+
+ err := filepath.Walk(env.repo, func(p string, fi os.FileInfo, e error) error {
+ if e != nil {
+ return e
+ }
+ return os.Chmod(p, fi.Mode() & ^(os.FileMode(0222)))
+ })
+ OK(t, err)
+
+ env.gopts.NoLock = true
+
+ testRunCheck(t, env.gopts)
+
+ snapshotIDs := testRunList(t, "snapshots", env.gopts)
+ if len(snapshotIDs) == 0 {
+ t.Fatalf("found no snapshots")
+ }
+
+ testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshotIDs[0])
+}
+
+func TestPrune(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := filepath.Join("testdata", "backup-data.tar.gz")
+ fd, err := os.Open(datafile)
+ if os.IsNotExist(errors.Cause(err)) {
+ t.Skipf("unable to find data file %q, skipping", datafile)
+ return
+ }
+ OK(t, err)
+ OK(t, fd.Close())
+
+ testRunInit(t, env.gopts)
+
+ SetupTarTestFixture(t, env.testdata, datafile)
+ opts := BackupOptions{}
+
+ testRunBackup(t, []string{filepath.Join(env.testdata, "0", "0")}, opts, env.gopts)
+ firstSnapshot := testRunList(t, "snapshots", env.gopts)
+ Assert(t, len(firstSnapshot) == 1,
+ "expected one snapshot, got %v", firstSnapshot)
+
+ testRunBackup(t, []string{filepath.Join(env.testdata, "0", "0", "2")}, opts, env.gopts)
+ testRunBackup(t, []string{filepath.Join(env.testdata, "0", "0", "3")}, opts, env.gopts)
+
+ snapshotIDs := testRunList(t, "snapshots", env.gopts)
+ Assert(t, len(snapshotIDs) == 3,
+ "expected 3 snapshot, got %v", snapshotIDs)
+
+ testRunForget(t, env.gopts, firstSnapshot[0].String())
+ testRunPrune(t, env.gopts)
+ testRunCheck(t, env.gopts)
+}
+
+func TestHardLink(t *testing.T) {
+ // this test assumes a test set with a single directory containing hard linked files
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := filepath.Join("testdata", "test.hl.tar.gz")
+ fd, err := os.Open(datafile)
+ if os.IsNotExist(errors.Cause(err)) {
+ t.Skipf("unable to find data file %q, skipping", datafile)
+ return
+ }
+ OK(t, err)
+ OK(t, fd.Close())
+
+ testRunInit(t, env.gopts)
+
+ SetupTarTestFixture(t, env.testdata, datafile)
+
+ linkTests := createFileSetPerHardlink(env.testdata)
+
+ opts := BackupOptions{}
+
+ // first backup
+ testRunBackup(t, []string{env.testdata}, opts, env.gopts)
+ snapshotIDs := testRunList(t, "snapshots", env.gopts)
+ Assert(t, len(snapshotIDs) == 1,
+ "expected one snapshot, got %v", snapshotIDs)
+
+ testRunCheck(t, env.gopts)
+
+ // restore all backups and compare
+ for i, snapshotID := range snapshotIDs {
+ restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
+ t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir)
+ testRunRestore(t, env.gopts, restoredir, snapshotIDs[0])
+ Assert(t, directoriesEqualContents(env.testdata, filepath.Join(restoredir, "testdata")),
+ "directories are not equal")
+
+ linkResults := createFileSetPerHardlink(filepath.Join(restoredir, "testdata"))
+ Assert(t, linksEqual(linkTests, linkResults),
+ "links are not equal")
+ }
+
+ testRunCheck(t, env.gopts)
+}
+
+func linksEqual(source, dest map[uint64][]string) bool {
+ for _, vs := range source {
+ found := false
+ for kd, vd := range dest {
+ if linkEqual(vs, vd) {
+ delete(dest, kd)
+ found = true
+ break
+ }
+ }
+ if !found {
+ return false
+ }
+ }
+
+ if len(dest) != 0 {
+ return false
+ }
+
+ return true
+}
+
+func linkEqual(source, dest []string) bool {
+ // equal if sliced are equal without considering order
+ if source == nil && dest == nil {
+ return true
+ }
+
+ if source == nil || dest == nil {
+ return false
+ }
+
+ if len(source) != len(dest) {
+ return false
+ }
+
+ for i := range source {
+ found := false
+ for j := range dest {
+ if source[i] == dest[j] {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/cmd/restic/local_layout_test.go b/cmd/restic/local_layout_test.go
new file mode 100644
index 000000000..4d22342ee
--- /dev/null
+++ b/cmd/restic/local_layout_test.go
@@ -0,0 +1,41 @@
+package main
+
+import (
+ "path/filepath"
+ "testing"
+
+ . "github.com/restic/restic/internal/test"
+)
+
+func TestRestoreLocalLayout(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ var tests = []struct {
+ filename string
+ layout string
+ }{
+ {"repo-layout-default.tar.gz", ""},
+ {"repo-layout-s3legacy.tar.gz", ""},
+ {"repo-layout-default.tar.gz", "default"},
+ {"repo-layout-s3legacy.tar.gz", "s3legacy"},
+ }
+
+ for _, test := range tests {
+ datafile := filepath.Join("..", "..", "internal", "backend", "testdata", test.filename)
+
+ SetupTarTestFixture(t, env.base, datafile)
+
+ env.gopts.extended["local.layout"] = test.layout
+
+ // check the repo
+ testRunCheck(t, env.gopts)
+
+ // restore latest snapshot
+ target := filepath.Join(env.base, "restore")
+ testRunRestoreLatest(t, env.gopts, target, nil, "")
+
+ RemoveAll(t, filepath.Join(env.base, "repo"))
+ RemoveAll(t, target)
+ }
+}
diff --git a/cmd/restic/lock.go b/cmd/restic/lock.go
new file mode 100644
index 000000000..c147a9cb5
--- /dev/null
+++ b/cmd/restic/lock.go
@@ -0,0 +1,127 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "sync"
+ "time"
+
+ "github.com/restic/restic/internal/debug"
+ "github.com/restic/restic/internal/repository"
+ "github.com/restic/restic/internal/restic"
+)
+
+var globalLocks struct {
+ locks []*restic.Lock
+ cancelRefresh chan struct{}
+ refreshWG sync.WaitGroup
+ sync.Mutex
+}
+
+func lockRepo(repo *repository.Repository) (*restic.Lock, error) {
+ return lockRepository(repo, false)
+}
+
+func lockRepoExclusive(repo *repository.Repository) (*restic.Lock, error) {
+ return lockRepository(repo, true)
+}
+
+func lockRepository(repo *repository.Repository, exclusive bool) (*restic.Lock, error) {
+ lockFn := restic.NewLock
+ if exclusive {
+ lockFn = restic.NewExclusiveLock
+ }
+
+ lock, err := lockFn(context.TODO(), repo)
+ if err != nil {
+ return nil, err
+ }
+ debug.Log("create lock %p (exclusive %v)", lock, exclusive)
+
+ globalLocks.Lock()
+ if globalLocks.cancelRefresh == nil {
+ debug.Log("start goroutine for lock refresh")
+ globalLocks.cancelRefresh = make(chan struct{})
+ globalLocks.refreshWG = sync.WaitGroup{}
+ globalLocks.refreshWG.Add(1)
+ go refreshLocks(&globalLocks.refreshWG, globalLocks.cancelRefresh)
+ }
+
+ globalLocks.locks = append(globalLocks.locks, lock)
+ globalLocks.Unlock()
+
+ return lock, err
+}
+
+var refreshInterval = 5 * time.Minute
+
+func refreshLocks(wg *sync.WaitGroup, done <-chan struct{}) {
+ debug.Log("start")
+ defer func() {
+ wg.Done()
+ globalLocks.Lock()
+ globalLocks.cancelRefresh = nil
+ globalLocks.Unlock()
+ }()
+
+ ticker := time.NewTicker(refreshInterval)
+
+ for {
+ select {
+ case <-done:
+ debug.Log("terminate")
+ return
+ case <-ticker.C:
+ debug.Log("refreshing locks")
+ globalLocks.Lock()
+ for _, lock := range globalLocks.locks {
+ err := lock.Refresh(context.TODO())
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unable to refresh lock: %v\n", err)
+ }
+ }
+ globalLocks.Unlock()
+ }
+ }
+}
+
+func unlockRepo(lock *restic.Lock) error {
+ globalLocks.Lock()
+ defer globalLocks.Unlock()
+
+ debug.Log("unlocking repository with lock %p", lock)
+ if err := lock.Unlock(); err != nil {
+ debug.Log("error while unlocking: %v", err)
+ return err
+ }
+
+ for i := 0; i < len(globalLocks.locks); i++ {
+ if lock == globalLocks.locks[i] {
+ globalLocks.locks = append(globalLocks.locks[:i], globalLocks.locks[i+1:]...)
+ return nil
+ }
+ }
+
+ return nil
+}
+
+func unlockAll() error {
+ globalLocks.Lock()
+ defer globalLocks.Unlock()
+
+ debug.Log("unlocking %d locks", len(globalLocks.locks))
+ for _, lock := range globalLocks.locks {
+ if err := lock.Unlock(); err != nil {
+ debug.Log("error while unlocking: %v", err)
+ return err
+ }
+ debug.Log("successfully removed lock")
+ }
+
+ return nil
+}
+
+func init() {
+ AddCleanupHandler(unlockAll)
+}
diff --git a/cmd/restic/main.go b/cmd/restic/main.go
new file mode 100644
index 000000000..d1f9c5547
--- /dev/null
+++ b/cmd/restic/main.go
@@ -0,0 +1,94 @@
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "log"
+ "os"
+ "runtime"
+
+ "github.com/restic/restic/internal/debug"
+ "github.com/restic/restic/internal/options"
+ "github.com/restic/restic/internal/restic"
+
+ "github.com/spf13/cobra"
+
+ "github.com/restic/restic/internal/errors"
+)
+
+// cmdRoot is the base command when no other command has been specified.
+var cmdRoot = &cobra.Command{
+ Use: "restic",
+ Short: "Backup and restore files",
+ Long: `
+restic is a backup program which allows saving multiple revisions of files and
+directories in an encrypted repository stored on different backends.
+`,
+ SilenceErrors: true,
+ SilenceUsage: true,
+ DisableAutoGenTag: true,
+
+ PersistentPreRunE: func(*cobra.Command, []string) error {
+ // parse extended options
+ opts, err := options.Parse(globalOptions.Options)
+ if err != nil {
+ return err
+ }
+ globalOptions.extended = opts
+
+ pwd, err := resolvePassword(globalOptions, "RESTIC_PASSWORD")
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Resolving password failed: %v\n", err)
+ Exit(1)
+ }
+ globalOptions.password = pwd
+
+ // run the debug functions for all subcommands (if build tag "debug" is
+ // enabled)
+ if err := runDebug(); err != nil {
+ return err
+ }
+
+ return nil
+ },
+}
+
+var logBuffer = bytes.NewBuffer(nil)
+
+func init() {
+ // install custom global logger into a buffer, if an error occurs
+ // we can show the logs
+ log.SetOutput(logBuffer)
+}
+
+func main() {
+ debug.Log("main %#v", os.Args)
+ debug.Log("restic %s, compiled with %v on %v/%v",
+ version, runtime.Version(), runtime.GOOS, runtime.GOARCH)
+ err := cmdRoot.Execute()
+
+ switch {
+ case restic.IsAlreadyLocked(errors.Cause(err)):
+ fmt.Fprintf(os.Stderr, "%v\nthe `unlock` command can be used to remove stale locks\n", err)
+ case errors.IsFatal(errors.Cause(err)):
+ fmt.Fprintf(os.Stderr, "%v\n", err)
+ case err != nil:
+ fmt.Fprintf(os.Stderr, "%+v\n", err)
+
+ if logBuffer.Len() > 0 {
+ fmt.Fprintf(os.Stderr, "also, the following messages were logged by a library:\n")
+ sc := bufio.NewScanner(logBuffer)
+ for sc.Scan() {
+ fmt.Fprintln(os.Stderr, sc.Text())
+ }
+ }
+ }
+
+ var exitCode int
+ if err != nil {
+ exitCode = 1
+ }
+
+ Exit(exitCode)
+}
diff --git a/cmd/restic/table.go b/cmd/restic/table.go
new file mode 100644
index 000000000..7a5d17a53
--- /dev/null
+++ b/cmd/restic/table.go
@@ -0,0 +1,46 @@
+package main
+
+import (
+ "fmt"
+ "io"
+ "strings"
+)
+
+// Table contains data for a table to be printed.
+type Table struct {
+ Header string
+ Rows [][]interface{}
+
+ RowFormat string
+}
+
+// NewTable initializes a new Table.
+func NewTable() Table {
+ return Table{
+ Rows: [][]interface{}{},
+ }
+}
+
+// Write prints the table to w.
+func (t Table) Write(w io.Writer) error {
+ _, err := fmt.Fprintln(w, t.Header)
+ if err != nil {
+ return err
+ }
+ _, err = fmt.Fprintln(w, strings.Repeat("-", 70))
+ if err != nil {
+ return err
+ }
+
+ for _, row := range t.Rows {
+ _, err = fmt.Fprintf(w, t.RowFormat+"\n", row...)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// TimeFormat is the format used for all timestamps printed by restic.
+const TimeFormat = "2006-01-02 15:04:05"
diff --git a/cmd/restic/testdata/backup-data.tar.gz b/cmd/restic/testdata/backup-data.tar.gz
new file mode 100644
index 000000000..337c18fd9
--- /dev/null
+++ b/cmd/restic/testdata/backup-data.tar.gz
Binary files differ
diff --git a/cmd/restic/testdata/old-index-repo.tar.gz b/cmd/restic/testdata/old-index-repo.tar.gz
new file mode 100644
index 000000000..9cfc38573
--- /dev/null
+++ b/cmd/restic/testdata/old-index-repo.tar.gz
Binary files differ
diff --git a/cmd/restic/testdata/repo-restore-permissions-test.tar.gz b/cmd/restic/testdata/repo-restore-permissions-test.tar.gz
new file mode 100644
index 000000000..36aa62dbf
--- /dev/null
+++ b/cmd/restic/testdata/repo-restore-permissions-test.tar.gz
Binary files differ
diff --git a/cmd/restic/testdata/repo-same-timestamps.tar.gz b/cmd/restic/testdata/repo-same-timestamps.tar.gz
new file mode 100644
index 000000000..42cf2b2bb
--- /dev/null
+++ b/cmd/restic/testdata/repo-same-timestamps.tar.gz
Binary files differ
diff --git a/cmd/restic/testdata/small-repo.tar.gz b/cmd/restic/testdata/small-repo.tar.gz
new file mode 100644
index 000000000..92ce87962
--- /dev/null
+++ b/cmd/restic/testdata/small-repo.tar.gz
Binary files differ
diff --git a/cmd/restic/testdata/test.hl.tar.gz b/cmd/restic/testdata/test.hl.tar.gz
new file mode 100644
index 000000000..302578199
--- /dev/null
+++ b/cmd/restic/testdata/test.hl.tar.gz
Binary files differ