summaryrefslogtreecommitdiff
path: root/cmd
diff options
context:
space:
mode:
authorFélix Sipma <felix+debian@gueux.org>2019-07-30 20:04:27 +0200
committerFélix Sipma <felix+debian@gueux.org>2019-07-30 20:04:27 +0200
commit7f40a33724424ab2a5df3a215451775a71680452 (patch)
tree65e4220d82a7bbf1f172eda40bfaabcb18d1f30a /cmd
parent3ca26846d6c7013885026f21841c280c4964010d (diff)
New upstream version 0.9.5+ds
Diffstat (limited to 'cmd')
-rw-r--r--cmd/restic/acl.go131
-rw-r--r--cmd/restic/acl_test.go96
-rw-r--r--cmd/restic/cmd_backup.go115
-rw-r--r--cmd/restic/cmd_cat.go6
-rw-r--r--cmd/restic/cmd_dump.go183
-rw-r--r--cmd/restic/cmd_find.go63
-rw-r--r--cmd/restic/cmd_forget.go231
-rw-r--r--cmd/restic/cmd_generate.go2
-rw-r--r--cmd/restic/cmd_mount.go2
-rw-r--r--cmd/restic/cmd_restore.go51
-rw-r--r--cmd/restic/cmd_snapshots.go127
-rw-r--r--cmd/restic/cmd_stats.go3
-rw-r--r--cmd/restic/exclude.go12
-rw-r--r--cmd/restic/exclude_test.go27
-rw-r--r--cmd/restic/global.go30
-rw-r--r--cmd/restic/integration_test.go30
16 files changed, 856 insertions, 253 deletions
diff --git a/cmd/restic/acl.go b/cmd/restic/acl.go
new file mode 100644
index 000000000..562ea89e1
--- /dev/null
+++ b/cmd/restic/acl.go
@@ -0,0 +1,131 @@
+package main
+
+// Adapted from https://github.com/maxymania/go-system/blob/master/posix_acl/posix_acl.go
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+)
+
+const (
+ aclUserOwner = 0x0001
+ aclUser = 0x0002
+ aclGroupOwner = 0x0004
+ aclGroup = 0x0008
+ aclMask = 0x0010
+ aclOthers = 0x0020
+)
+
+type aclSID uint64
+
+type aclElem struct {
+ Tag uint16
+ Perm uint16
+ ID uint32
+}
+
+type acl struct {
+ Version uint32
+ List []aclElement
+}
+
+type aclElement struct {
+ aclSID
+ Perm uint16
+}
+
+func (a *aclSID) setUID(uid uint32) {
+ *a = aclSID(uid) | (aclUser << 32)
+}
+func (a *aclSID) setGID(gid uint32) {
+ *a = aclSID(gid) | (aclGroup << 32)
+}
+
+func (a *aclSID) setType(tp int) {
+ *a = aclSID(tp) << 32
+}
+
+func (a aclSID) getType() int {
+ return int(a >> 32)
+}
+func (a aclSID) getID() uint32 {
+ return uint32(a & 0xffffffff)
+}
+func (a aclSID) String() string {
+ switch a >> 32 {
+ case aclUserOwner:
+ return "user::"
+ case aclUser:
+ return fmt.Sprintf("user:%v:", a.getID())
+ case aclGroupOwner:
+ return "group::"
+ case aclGroup:
+ return fmt.Sprintf("group:%v:", a.getID())
+ case aclMask:
+ return "mask::"
+ case aclOthers:
+ return "other::"
+ }
+ return "?:"
+}
+
+func (a aclElement) String() string {
+ str := ""
+ if (a.Perm & 4) != 0 {
+ str += "r"
+ } else {
+ str += "-"
+ }
+ if (a.Perm & 2) != 0 {
+ str += "w"
+ } else {
+ str += "-"
+ }
+ if (a.Perm & 1) != 0 {
+ str += "x"
+ } else {
+ str += "-"
+ }
+ return fmt.Sprintf("%v%v", a.aclSID, str)
+}
+
+func (a *acl) decode(xattr []byte) {
+ var elem aclElement
+ ae := new(aclElem)
+ nr := bytes.NewReader(xattr)
+ e := binary.Read(nr, binary.LittleEndian, &a.Version)
+ if e != nil {
+ a.Version = 0
+ return
+ }
+ if len(a.List) > 0 {
+ a.List = a.List[:0]
+ }
+ for binary.Read(nr, binary.LittleEndian, ae) == nil {
+ elem.aclSID = (aclSID(ae.Tag) << 32) | aclSID(ae.ID)
+ elem.Perm = ae.Perm
+ a.List = append(a.List, elem)
+ }
+}
+
+func (a *acl) encode() []byte {
+ buf := new(bytes.Buffer)
+ ae := new(aclElem)
+ binary.Write(buf, binary.LittleEndian, &a.Version)
+ for _, elem := range a.List {
+ ae.Tag = uint16(elem.getType())
+ ae.Perm = elem.Perm
+ ae.ID = elem.getID()
+ binary.Write(buf, binary.LittleEndian, ae)
+ }
+ return buf.Bytes()
+}
+
+func (a *acl) String() string {
+ var finalacl string
+ for _, acl := range a.List {
+ finalacl += acl.String() + "\n"
+ }
+ return finalacl
+}
diff --git a/cmd/restic/acl_test.go b/cmd/restic/acl_test.go
new file mode 100644
index 000000000..1e069d168
--- /dev/null
+++ b/cmd/restic/acl_test.go
@@ -0,0 +1,96 @@
+package main
+
+import (
+ "reflect"
+ "testing"
+)
+
+func Test_acl_decode(t *testing.T) {
+ type args struct {
+ xattr []byte
+ }
+ tests := []struct {
+ name string
+ args args
+ want string
+ }{
+ {
+ name: "decode string",
+ args: args{
+ xattr: []byte{2, 0, 0, 0, 1, 0, 6, 0, 255, 255, 255, 255, 2, 0, 7, 0, 0, 0, 0, 0, 2, 0, 7, 0, 254, 255, 0, 0, 4, 0, 7, 0, 255, 255, 255, 255, 16, 0, 7, 0, 255, 255, 255, 255, 32, 0, 4, 0, 255, 255, 255, 255},
+ },
+ want: "user::rw-\nuser:0:rwx\nuser:65534:rwx\ngroup::rwx\nmask::rwx\nother::r--\n",
+ },
+ {
+ name: "decode fail",
+ args: args{
+ xattr: []byte("abctest"),
+ },
+ want: "",
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ a := &acl{}
+ a.decode(tt.args.xattr)
+ if tt.want != a.String() {
+ t.Errorf("acl.decode() = %v, want: %v", a.String(), tt.want)
+ }
+ })
+ }
+}
+
+func Test_acl_encode(t *testing.T) {
+ tests := []struct {
+ name string
+ want []byte
+ args []aclElement
+ }{
+ {
+ name: "encode values",
+ want: []byte{2, 0, 0, 0, 1, 0, 6, 0, 255, 255, 255, 255, 2, 0, 7, 0, 0, 0, 0, 0, 2, 0, 7, 0, 254, 255, 0, 0, 4, 0, 7, 0, 255, 255, 255, 255, 16, 0, 7, 0, 255, 255, 255, 255, 32, 0, 4, 0, 255, 255, 255, 255},
+ args: []aclElement{
+ {
+ aclSID: 8589934591,
+ Perm: 6,
+ },
+ {
+ aclSID: 8589934592,
+ Perm: 7,
+ },
+ {
+ aclSID: 8590000126,
+ Perm: 7,
+ },
+ {
+ aclSID: 21474836479,
+ Perm: 7,
+ },
+ {
+ aclSID: 73014444031,
+ Perm: 7,
+ },
+ {
+ aclSID: 141733920767,
+ Perm: 4,
+ },
+ },
+ },
+ {
+ name: "encode fail",
+ want: []byte{2, 0, 0, 0},
+ args: []aclElement{},
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ a := &acl{
+ Version: 2,
+ List: tt.args,
+ }
+ if got := a.encode(); !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("acl.encode() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go
index fc24868a5..a37d3f168 100644
--- a/cmd/restic/cmd_backup.go
+++ b/cmd/restic/cmd_backup.go
@@ -5,6 +5,7 @@ import (
"bytes"
"context"
"fmt"
+ "io"
"io/ioutil"
"os"
"path/filepath"
@@ -23,6 +24,7 @@ import (
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/textfile"
"github.com/restic/restic/internal/ui"
+ "github.com/restic/restic/internal/ui/jsonstatus"
"github.com/restic/restic/internal/ui/termstatus"
)
@@ -68,20 +70,22 @@ given as the arguments.
// BackupOptions bundles all options for the backup command.
type BackupOptions struct {
- Parent string
- Force bool
- Excludes []string
- ExcludeFiles []string
- ExcludeOtherFS bool
- ExcludeIfPresent []string
- ExcludeCaches bool
- Stdin bool
- StdinFilename string
- Tags []string
- Host string
- FilesFrom []string
- TimeStamp string
- WithAtime bool
+ Parent string
+ Force bool
+ Excludes []string
+ InsensitiveExcludes []string
+ ExcludeFiles []string
+ ExcludeOtherFS bool
+ ExcludeIfPresent []string
+ ExcludeCaches bool
+ Stdin bool
+ StdinFilename string
+ Tags []string
+ Host string
+ FilesFrom []string
+ TimeStamp string
+ WithAtime bool
+ IgnoreInode bool
}
var backupOptions BackupOptions
@@ -93,10 +97,11 @@ func init() {
f.StringVar(&backupOptions.Parent, "parent", "", "use this parent snapshot (default: last snapshot in the repo that has the same target files/directories)")
f.BoolVarP(&backupOptions.Force, "force", "f", false, `force re-reading the target files/directories (overrides the "parent" flag)`)
f.StringArrayVarP(&backupOptions.Excludes, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)")
+ f.StringArrayVar(&backupOptions.InsensitiveExcludes, "iexclude", nil, "same as `--exclude` but ignores the casing of filenames")
f.StringArrayVar(&backupOptions.ExcludeFiles, "exclude-file", nil, "read exclude patterns from a `file` (can be specified multiple times)")
f.BoolVarP(&backupOptions.ExcludeOtherFS, "one-file-system", "x", false, "exclude other file systems")
f.StringArrayVar(&backupOptions.ExcludeIfPresent, "exclude-if-present", nil, "takes filename[:header], exclude contents of directories containing filename (except filename itself) if header of that file is as provided (can be specified multiple times)")
- f.BoolVar(&backupOptions.ExcludeCaches, "exclude-caches", false, `excludes cache directories that are marked with a CACHEDIR.TAG file`)
+ f.BoolVar(&backupOptions.ExcludeCaches, "exclude-caches", false, `excludes cache directories that are marked with a CACHEDIR.TAG file. See http://bford.info/cachedir/spec.html for the Cache Directory Tagging Standard`)
f.BoolVar(&backupOptions.Stdin, "stdin", false, "read backup from stdin")
f.StringVar(&backupOptions.StdinFilename, "stdin-filename", "stdin", "file name to use when reading from stdin")
f.StringArrayVar(&backupOptions.Tags, "tag", nil, "add a `tag` for the new snapshot (can be specified multiple times)")
@@ -108,6 +113,7 @@ func init() {
f.StringArrayVar(&backupOptions.FilesFrom, "files-from", nil, "read the files to backup from file (can be combined with file args/can be specified multiple times)")
f.StringVar(&backupOptions.TimeStamp, "time", "", "time of the backup (ex. '2012-11-01 22:08:41') (default: now)")
f.BoolVar(&backupOptions.WithAtime, "with-atime", false, "store the atime for all files and directories")
+ f.BoolVar(&backupOptions.IgnoreInode, "ignore-inode", false, "ignore inode number changes when checking for modified files")
}
// filterExisting returns a slice of all existing items, or an error if no
@@ -222,6 +228,10 @@ func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository, t
opts.Excludes = append(opts.Excludes, excludes...)
}
+ if len(opts.InsensitiveExcludes) > 0 {
+ fs = append(fs, rejectByInsensitivePattern(opts.InsensitiveExcludes))
+ }
+
if len(opts.Excludes) > 0 {
fs = append(fs, rejectByPattern(opts.Excludes))
}
@@ -395,13 +405,43 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
var t tomb.Tomb
- term.Print("open repository\n")
+ if gopts.verbosity >= 2 && !gopts.JSON {
+ term.Print("open repository\n")
+ }
+
repo, err := OpenRepository(gopts)
if err != nil {
return err
}
- p := ui.NewBackup(term, gopts.verbosity)
+ type ArchiveProgressReporter interface {
+ CompleteItem(item string, previous, current *restic.Node, s archiver.ItemStats, d time.Duration)
+ StartFile(filename string)
+ CompleteBlob(filename string, bytes uint64)
+ ScannerError(item string, fi os.FileInfo, err error) error
+ ReportTotal(item string, s archiver.ScanStats)
+ SetMinUpdatePause(d time.Duration)
+ Run(ctx context.Context) error
+ Error(item string, fi os.FileInfo, err error) error
+ Finish(snapshotID restic.ID)
+
+ // ui.StdioWrapper
+ Stdout() io.WriteCloser
+ Stderr() io.WriteCloser
+
+ // ui.Message
+ E(msg string, args ...interface{})
+ P(msg string, args ...interface{})
+ V(msg string, args ...interface{})
+ VV(msg string, args ...interface{})
+ }
+
+ var p ArchiveProgressReporter
+ if gopts.JSON {
+ p = jsonstatus.NewBackup(term, gopts.verbosity)
+ } else {
+ p = ui.NewBackup(term, gopts.verbosity)
+ }
// use the terminal for stdout/stderr
prevStdout, prevStderr := gopts.stdout, gopts.stderr
@@ -416,13 +456,15 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
if fps > 60 {
fps = 60
}
- p.MinUpdatePause = time.Second / time.Duration(fps)
+ p.SetMinUpdatePause(time.Second / time.Duration(fps))
}
}
t.Go(func() error { return p.Run(t.Context(gopts.ctx)) })
- p.V("lock repository")
+ if !gopts.JSON {
+ p.V("lock repository")
+ }
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil {
@@ -441,7 +483,9 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
return err
}
- p.V("load index files")
+ if !gopts.JSON {
+ p.V("load index files")
+ }
err = repo.LoadIndex(gopts.ctx)
if err != nil {
return err
@@ -452,7 +496,7 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
return err
}
- if parentSnapshotID != nil {
+ if !gopts.JSON && parentSnapshotID != nil {
p.V("using parent snapshot %v\n", parentSnapshotID.Str())
}
@@ -476,7 +520,9 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
var targetFS fs.FS = fs.Local{}
if opts.Stdin {
- p.V("read data from stdin")
+ if !gopts.JSON {
+ p.V("read data from stdin")
+ }
targetFS = &fs.Reader{
ModTime: timeStamp,
Name: opts.StdinFilename,
@@ -492,7 +538,9 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
sc.Error = p.ScannerError
sc.Result = p.ReportTotal
- p.V("start scan on %v", targets)
+ if !gopts.JSON {
+ p.V("start scan on %v", targets)
+ }
t.Go(func() error { return sc.Scan(t.Context(gopts.ctx), targets) })
arch := archiver.New(repo, targetFS, archiver.Options{})
@@ -500,9 +548,10 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
arch.Select = selectFilter
arch.WithAtime = opts.WithAtime
arch.Error = p.Error
- arch.CompleteItem = p.CompleteItemFn
+ arch.CompleteItem = p.CompleteItem
arch.StartFile = p.StartFile
arch.CompleteBlob = p.CompleteBlob
+ arch.IgnoreInode = opts.IgnoreInode
if parentSnapshotID == nil {
parentSnapshotID = &restic.ID{}
@@ -519,10 +568,14 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
uploader := archiver.IndexUploader{
Repository: repo,
Start: func() {
- p.VV("uploading intermediate index")
+ if !gopts.JSON {
+ p.VV("uploading intermediate index")
+ }
},
Complete: func(id restic.ID) {
- p.V("uploaded intermediate index %v", id.Str())
+ if !gopts.JSON {
+ p.V("uploaded intermediate index %v", id.Str())
+ }
},
}
@@ -530,14 +583,18 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
return uploader.Upload(gopts.ctx, t.Context(gopts.ctx), 30*time.Second)
})
- p.V("start backup on %v", targets)
+ if !gopts.JSON {
+ p.V("start backup on %v", targets)
+ }
_, id, err := arch.Snapshot(gopts.ctx, targets, snapshotOpts)
if err != nil {
return errors.Fatalf("unable to save snapshot: %v", err)
}
- p.Finish()
- p.P("snapshot %s saved\n", id.Str())
+ p.Finish(id)
+ if !gopts.JSON {
+ p.P("snapshot %s saved\n", id.Str())
+ }
// cleanly shutdown all running goroutines
t.Kill(nil)
diff --git a/cmd/restic/cmd_cat.go b/cmd/restic/cmd_cat.go
index e735daf88..0dffc0508 100644
--- a/cmd/restic/cmd_cat.go
+++ b/cmd/restic/cmd_cat.go
@@ -74,7 +74,7 @@ func runCat(gopts GlobalOptions, args []string) error {
fmt.Println(string(buf))
return nil
case "index":
- buf, err := repo.LoadAndDecrypt(gopts.ctx, restic.IndexFile, id)
+ buf, err := repo.LoadAndDecrypt(gopts.ctx, nil, restic.IndexFile, id)
if err != nil {
return err
}
@@ -99,7 +99,7 @@ func runCat(gopts GlobalOptions, args []string) error {
return nil
case "key":
h := restic.Handle{Type: restic.KeyFile, Name: id.String()}
- buf, err := backend.LoadAll(gopts.ctx, repo.Backend(), h)
+ buf, err := backend.LoadAll(gopts.ctx, nil, repo.Backend(), h)
if err != nil {
return err
}
@@ -150,7 +150,7 @@ func runCat(gopts GlobalOptions, args []string) error {
switch tpe {
case "pack":
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
- buf, err := backend.LoadAll(gopts.ctx, repo.Backend(), h)
+ buf, err := backend.LoadAll(gopts.ctx, nil, repo.Backend(), h)
if err != nil {
return err
}
diff --git a/cmd/restic/cmd_dump.go b/cmd/restic/cmd_dump.go
index a2e4fbe4a..ba21c8ea6 100644
--- a/cmd/restic/cmd_dump.go
+++ b/cmd/restic/cmd_dump.go
@@ -1,15 +1,19 @@
package main
import (
+ "archive/tar"
"context"
"fmt"
+ "io"
"os"
"path"
"path/filepath"
+ "strings"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
+ "github.com/restic/restic/internal/walker"
"github.com/spf13/cobra"
)
@@ -50,41 +54,18 @@ func init() {
func splitPath(p string) []string {
d, f := path.Split(p)
- if d == "" || d == "/" {
+ if d == "" {
return []string{f}
}
+ if d == "/" {
+ return []string{d}
+ }
s := splitPath(path.Clean(d))
return append(s, f)
}
-func dumpNode(ctx context.Context, repo restic.Repository, node *restic.Node) error {
- var buf []byte
- for _, id := range node.Content {
- size, found := repo.LookupBlobSize(id, restic.DataBlob)
- if !found {
- return errors.Errorf("id %v not found in repository", id)
- }
-
- buf = buf[:cap(buf)]
- if len(buf) < restic.CiphertextLength(int(size)) {
- buf = restic.NewBlobBuffer(int(size))
- }
-
- n, err := repo.LoadBlob(ctx, restic.DataBlob, id, buf)
- if err != nil {
- return err
- }
- buf = buf[:n]
-
- _, err = os.Stdout.Write(buf)
- if err != nil {
- return errors.Wrap(err, "Write")
- }
- }
- return nil
-}
+func printFromTree(ctx context.Context, tree *restic.Tree, repo restic.Repository, prefix string, pathComponents []string, pathToPrint string) error {
-func printFromTree(ctx context.Context, tree *restic.Tree, repo restic.Repository, prefix string, pathComponents []string) error {
if tree == nil {
return fmt.Errorf("called with a nil tree")
}
@@ -97,16 +78,19 @@ func printFromTree(ctx context.Context, tree *restic.Tree, repo restic.Repositor
}
item := filepath.Join(prefix, pathComponents[0])
for _, node := range tree.Nodes {
- if node.Name == pathComponents[0] {
+ if node.Name == pathComponents[0] || pathComponents[0] == "/" {
switch {
case l == 1 && node.Type == "file":
- return dumpNode(ctx, repo, node)
+ return getNodeData(ctx, os.Stdout, repo, node)
case l > 1 && node.Type == "dir":
subtree, err := repo.LoadTree(ctx, *node.Subtree)
if err != nil {
return errors.Wrapf(err, "cannot load subtree for %q", item)
}
- return printFromTree(ctx, subtree, repo, item, pathComponents[1:])
+ return printFromTree(ctx, subtree, repo, item, pathComponents[1:], pathToPrint)
+ case node.Type == "dir":
+ node.Path = pathToPrint
+ return tarTree(ctx, repo, node, pathToPrint)
case l > 1:
return fmt.Errorf("%q should be a dir, but s a %q", item, node.Type)
case node.Type != "file":
@@ -129,7 +113,7 @@ func runDump(opts DumpOptions, gopts GlobalOptions, args []string) error {
debug.Log("dump file %q from %q", pathToPrint, snapshotIDString)
- splittedPath := splitPath(pathToPrint)
+ splittedPath := splitPath(path.Clean(pathToPrint))
repo, err := OpenRepository(gopts)
if err != nil {
@@ -173,10 +157,143 @@ func runDump(opts DumpOptions, gopts GlobalOptions, args []string) error {
Exitf(2, "loading tree for snapshot %q failed: %v", snapshotIDString, err)
}
- err = printFromTree(ctx, tree, repo, "", splittedPath)
+ err = printFromTree(ctx, tree, repo, "", splittedPath, pathToPrint)
if err != nil {
Exitf(2, "cannot dump file: %v", err)
}
return nil
}
+
+func getNodeData(ctx context.Context, output io.Writer, repo restic.Repository, node *restic.Node) error {
+ var buf []byte
+ for _, id := range node.Content {
+
+ size, found := repo.LookupBlobSize(id, restic.DataBlob)
+ if !found {
+ return errors.Errorf("id %v not found in repository", id)
+ }
+
+ buf = buf[:cap(buf)]
+ if len(buf) < restic.CiphertextLength(int(size)) {
+ buf = restic.NewBlobBuffer(int(size))
+ }
+
+ n, err := repo.LoadBlob(ctx, restic.DataBlob, id, buf)
+ if err != nil {
+ return err
+ }
+ buf = buf[:n]
+
+ _, err = output.Write(buf)
+ if err != nil {
+ return errors.Wrap(err, "Write")
+ }
+
+ }
+ return nil
+}
+
+func tarTree(ctx context.Context, repo restic.Repository, rootNode *restic.Node, rootPath string) error {
+
+ if stdoutIsTerminal() {
+ return fmt.Errorf("stdout is the terminal, please redirect output")
+ }
+
+ tw := tar.NewWriter(os.Stdout)
+ defer tw.Close()
+
+ // If we want to dump "/" we'll need to add the name of the first node, too
+ // as it would get lost otherwise.
+ if rootNode.Path == "/" {
+ rootNode.Path = path.Join(rootNode.Path, rootNode.Name)
+ rootPath = rootNode.Path
+ }
+
+ // we know that rootNode is a folder and walker.Walk will already process
+ // the next node, so we have to tar this one first, too
+ if err := tarNode(ctx, tw, rootNode, repo); err != nil {
+ return err
+ }
+
+ err := walker.Walk(ctx, repo, *rootNode.Subtree, nil, func(_ restic.ID, nodepath string, node *restic.Node, err error) (bool, error) {
+ if err != nil {
+ return false, err
+ }
+ if node == nil {
+ return false, nil
+ }
+
+ node.Path = path.Join(rootPath, nodepath)
+
+ if node.Type == "file" || node.Type == "symlink" || node.Type == "dir" {
+ err := tarNode(ctx, tw, node, repo)
+ if err != err {
+ return false, err
+ }
+ }
+
+ return false, nil
+ })
+
+ return err
+}
+
+func tarNode(ctx context.Context, tw *tar.Writer, node *restic.Node, repo restic.Repository) error {
+
+ header := &tar.Header{
+ Name: node.Path,
+ Size: int64(node.Size),
+ Mode: int64(node.Mode),
+ Uid: int(node.UID),
+ Gid: int(node.GID),
+ ModTime: node.ModTime,
+ AccessTime: node.AccessTime,
+ ChangeTime: node.ChangeTime,
+ PAXRecords: parseXattrs(node.ExtendedAttributes),
+ }
+
+ if node.Type == "symlink" {
+ header.Typeflag = tar.TypeSymlink
+ header.Linkname = node.LinkTarget
+ }
+
+ if node.Type == "dir" {
+ header.Typeflag = tar.TypeDir
+ }
+
+ err := tw.WriteHeader(header)
+
+ if err != nil {
+ return errors.Wrap(err, "TarHeader ")
+ }
+
+ return getNodeData(ctx, tw, repo, node)
+
+}
+
+func parseXattrs(xattrs []restic.ExtendedAttribute) map[string]string {
+ tmpMap := make(map[string]string)
+
+ for _, attr := range xattrs {
+ attrString := string(attr.Value)
+
+ if strings.HasPrefix(attr.Name, "system.posix_acl_") {
+ na := acl{}
+ na.decode(attr.Value)
+
+ if na.String() != "" {
+ if strings.Contains(attr.Name, "system.posix_acl_access") {
+ tmpMap["SCHILY.acl.access"] = na.String()
+ } else if strings.Contains(attr.Name, "system.posix_acl_default") {
+ tmpMap["SCHILY.acl.default"] = na.String()
+ }
+ }
+
+ } else {
+ tmpMap["SCHILY.xattr."+attr.Name] = attrString
+ }
+ }
+
+ return tmpMap
+}
diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go
index 48b952f5d..ed25256f7 100644
--- a/cmd/restic/cmd_find.go
+++ b/cmd/restic/cmd_find.go
@@ -62,7 +62,7 @@ func init() {
f.BoolVar(&findOptions.BlobID, "blob", false, "pattern is a blob-ID")
f.BoolVar(&findOptions.TreeID, "tree", false, "pattern is a tree-ID")
f.BoolVar(&findOptions.PackID, "pack", false, "pattern is a pack-ID")
- f.BoolVar(&findOptions.ShowPackID, "show-pack-id", false, "display the pack-ID the blobs belong to (with --blob)")
+ f.BoolVar(&findOptions.ShowPackID, "show-pack-id", false, "display the pack-ID the blobs belong to (with --blob or --tree)")
f.BoolVarP(&findOptions.CaseInsensitive, "ignore-case", "i", false, "ignore case for pattern")
f.BoolVarP(&findOptions.ListLong, "long", "l", false, "use a long listing format showing size and mode")
@@ -258,9 +258,13 @@ func (f *Finder) findInSnapshot(ctx context.Context, sn *restic.Snapshot) error
}
f.out.newsn = sn
- return walker.Walk(ctx, f.repo, *sn.Tree, f.ignoreTrees, func(_ restic.ID, nodepath string, node *restic.Node, err error) (bool, error) {
+ return walker.Walk(ctx, f.repo, *sn.Tree, f.ignoreTrees, func(parentTreeID restic.ID, nodepath string, node *restic.Node, err error) (bool, error) {
if err != nil {
- return false, err
+ debug.Log("Error loading tree %v: %v", parentTreeID, err)
+
+ Printf("Unable to load tree %s\n ... which belongs to snapshot %s.\n", parentTreeID, sn.ID())
+
+ return false, walker.SkipNode
}
if node == nil {
@@ -340,7 +344,11 @@ func (f *Finder) findIDs(ctx context.Context, sn *restic.Snapshot) error {
f.out.newsn = sn
return walker.Walk(ctx, f.repo, *sn.Tree, f.ignoreTrees, func(parentTreeID restic.ID, nodepath string, node *restic.Node, err error) (bool, error) {
if err != nil {
- return false, err
+ debug.Log("Error loading tree %v: %v", parentTreeID, err)
+
+ Printf("Unable to load tree %s\n ... which belongs to snapshot %s.\n", parentTreeID, sn.ID())
+
+ return false, walker.SkipNode
}
if node == nil {
@@ -442,30 +450,39 @@ func (f *Finder) packsToBlobs(ctx context.Context, packs []string) error {
return nil
}
-func (f *Finder) findBlobsPacks(ctx context.Context) {
+func (f *Finder) findObjectPack(ctx context.Context, id string, t restic.BlobType) {
idx := f.repo.Index()
- for i := range f.blobIDs {
- rid, err := restic.ParseID(i)
- if err != nil {
- Printf("Note: cannot find pack for blob '%s', unable to parse ID: %v\n", i, err)
- continue
- }
- blobs, found := idx.Lookup(rid, restic.DataBlob)
- if !found {
- Printf("Blob %s not found in the index\n", rid.Str())
- continue
- }
+ rid, err := restic.ParseID(id)
+ if err != nil {
+ Printf("Note: cannot find pack for object '%s', unable to parse ID: %v\n", id, err)
+ return
+ }
- for _, b := range blobs {
- if b.ID.Equal(rid) {
- Printf("Blob belongs to pack %s\n ... Pack %s: %s\n", b.PackID, b.PackID.Str(), b.String())
- break
- }
+ blobs, found := idx.Lookup(rid, t)
+ if !found {
+ Printf("Object %s not found in the index\n", rid.Str())
+ return
+ }
+
+ for _, b := range blobs {
+ if b.ID.Equal(rid) {
+ Printf("Object belongs to pack %s\n ... Pack %s: %s\n", b.PackID, b.PackID.Str(), b.String())
+ break
}
}
}
+func (f *Finder) findObjectsPacks(ctx context.Context) {
+ for i := range f.blobIDs {
+ f.findObjectPack(ctx, i, restic.DataBlob)
+ }
+
+ for i := range f.treeIDs {
+ f.findObjectPack(ctx, i, restic.TreeBlob)
+ }
+}
+
func runFind(opts FindOptions, gopts GlobalOptions, args []string) error {
if len(args) == 0 {
return errors.Fatal("wrong number of arguments")
@@ -557,8 +574,8 @@ func runFind(opts FindOptions, gopts GlobalOptions, args []string) error {
}
f.out.Finish()
- if opts.ShowPackID && f.blobIDs != nil {
- f.findBlobsPacks(ctx)
+ if opts.ShowPackID && (f.blobIDs != nil || f.treeIDs != nil) {
+ f.findObjectsPacks(ctx)
}
return nil
diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go
index bafd540bd..b047f1d45 100644
--- a/cmd/restic/cmd_forget.go
+++ b/cmd/restic/cmd_forget.go
@@ -3,10 +3,8 @@ package main
import (
"context"
"encoding/json"
- "sort"
- "strings"
+ "io"
- "github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
"github.com/spf13/cobra"
)
@@ -90,153 +88,129 @@ func runForget(opts ForgetOptions, gopts GlobalOptions, args []string) error {
return err
}
- // group by hostname and dirs
- type key struct {
- Hostname string
- Paths []string
- Tags []string
- }
- snapshotGroups := make(map[string]restic.Snapshots)
-
- var GroupByTag bool
- var GroupByHost bool
- var GroupByPath bool
- var GroupOptionList []string
-
- GroupOptionList = strings.Split(opts.GroupBy, ",")
-
- for _, option := range GroupOptionList {
- switch option {
- case "host":
- GroupByHost = true
- case "paths":
- GroupByPath = true
- case "tags":
- GroupByTag = true
- case "":
- default:
- return errors.Fatal("unknown grouping option: '" + option + "'")
- }
- }
-
removeSnapshots := 0
ctx, cancel := context.WithCancel(gopts.ctx)
defer cancel()
+
+ var snapshots restic.Snapshots
+
for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, args) {
- if len(args) > 0 {
- // When explicit snapshots args are given, remove them immediately.
+ snapshots = append(snapshots, sn)
+ }
+
+ if len(args) > 0 {
+ // When explicit snapshots args are given, remove them immediately.
+ for _, sn := range snapshots {
if !opts.DryRun {
h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
if err = repo.Backend().Remove(gopts.ctx, h); err != nil {
return err
}
- Verbosef("removed snapshot %v\n", sn.ID().Str())
+ if !gopts.JSON {
+ Verbosef("removed snapshot %v\n", sn.ID().Str())
+ }
removeSnapshots++
} else {
- Verbosef("would have removed snapshot %v\n", sn.ID().Str())
- }
- } else {
- // Determining grouping-keys
- var tags []string
- var hostname string
- var paths []string
-
- if GroupByTag {
- tags = sn.Tags
- sort.StringSlice(tags).Sort()
- }
- if GroupByHost {
- hostname = sn.Hostname
- }
- if GroupByPath {
- paths = sn.Paths
+ if !gopts.JSON {
+ Verbosef("would have removed snapshot %v\n", sn.ID().Str())
+ }
}
+ }
+ } else {
+ snapshotGroups, _, err := restic.GroupSnapshots(snapshots, opts.GroupBy)
+ if err != nil {
+ return err
+ }
- sort.StringSlice(sn.Paths).Sort()
- var k []byte
- var err error
-
- k, err = json.Marshal(key{Tags: tags, Hostname: hostname, Paths: paths})
+ policy := restic.ExpirePolicy{
+ Last: opts.Last,
+ Hourly: opts.Hourly,
+ Daily: opts.Daily,
+ Weekly: opts.Weekly,
+ Monthly: opts.Monthly,
+ Yearly: opts.Yearly,
+ Within: opts.Within,
+ Tags: opts.KeepTags,
+ }
- if err != nil {
- return err
+ if policy.Empty() && len(args) == 0 {
+ if !gopts.JSON {
+ Verbosef("no policy was specified, no snapshots will be removed\n")
}
- snapshotGroups[string(k)] = append(snapshotGroups[string(k)], sn)
}
- }
- policy := restic.ExpirePolicy{
- Last: opts.Last,
- Hourly: opts.Hourly,
- Daily: opts.Daily,
- Weekly: opts.Weekly,
- Monthly: opts.Monthly,
- Yearly: opts.Yearly,
- Within: opts.Within,
- Tags: opts.KeepTags,
- }
+ if !policy.Empty() {
+ if !gopts.JSON {
+ Verbosef("Applying Policy: %v\n", policy)
+ }
- if policy.Empty() && len(args) == 0 {
- Verbosef("no policy was specified, no snapshots will be removed\n")
- }
+ var jsonGroups []*ForgetGroup
- if !policy.Empty() {
- Verbosef("Applying Policy: %v\n", policy)
+ for k, snapshotGroup := range snapshotGroups {
+ if gopts.Verbose >= 1 && !gopts.JSON {
+ err = PrintSnapshotGroupHeader(gopts.stdout, k)
+ if err != nil {
+ return err
+ }
+ }
- for k, snapshotGroup := range snapshotGroups {
- var key key
- if json.Unmarshal([]byte(k), &key) != nil {
- return err
- }
+ var key restic.SnapshotGroupKey
+ if json.Unmarshal([]byte(k), &key) != nil {
+ return err
+ }
- // Info
- Verbosef("snapshots")
- var infoStrings []string
- if GroupByTag {
- infoStrings = append(infoStrings, "tags ["+strings.Join(key.Tags, ", ")+"]")
- }
- if GroupByHost {
- infoStrings = append(infoStrings, "host ["+key.Hostname+"]")
- }
- if GroupByPath {
- infoStrings = append(infoStrings, "paths ["+strings.Join(key.Paths, ", ")+"]")
- }
- if infoStrings != nil {
- Verbosef(" for (" + strings.Join(infoStrings, ", ") + ")")
- }
- Verbosef(":\n\n")
+ var fg ForgetGroup
+ fg.Tags = key.Tags
+ fg.Host = key.Hostname
+ fg.Paths = key.Paths
- keep, remove, reasons := restic.ApplyPolicy(snapshotGroup, policy)
+ keep, remove, reasons := restic.ApplyPolicy(snapshotGroup, policy)
- if len(keep) != 0 && !gopts.Quiet {
- Printf("keep %d snapshots:\n", len(keep))
- PrintSnapshots(globalOptions.stdout, keep, reasons, opts.Compact)
- Printf("\n")
- }
+ if len(keep) != 0 && !gopts.Quiet && !gopts.JSON {
+ Printf("keep %d snapshots:\n", len(keep))
+ PrintSnapshots(globalOptions.stdout, keep, reasons, opts.Compact)
+ Printf("\n")
+ }
+ addJSONSnapshots(&fg.Keep, keep)
- if len(remove) != 0 && !gopts.Quiet {
- Printf("remove %d snapshots:\n", len(remove))
- PrintSnapshots(globalOptions.stdout, remove, nil, opts.Compact)
- Printf("\n")
- }
+ if len(remove) != 0 && !gopts.Quiet && !gopts.JSON {
+ Printf("remove %d snapshots:\n", len(remove))
+ PrintSnapshots(globalOptions.stdout, remove, nil, opts.Compact)
+ Printf("\n")
+ }
+ addJSONSnapshots(&fg.Remove, remove)
- removeSnapshots += len(remove)
+ fg.Reasons = reasons
- if !opts.DryRun {
- for _, sn := range remove {
- h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
- err = repo.Backend().Remove(gopts.ctx, h)
- if err != nil {
- return err
+ jsonGroups = append(jsonGroups, &fg)
+
+ removeSnapshots += len(remove)
+
+ if !opts.DryRun {
+ for _, sn := range remove {
+ h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
+ err = repo.Backend().Remove(gopts.ctx, h)
+ if err != nil {
+ return err
+ }
}
}
}
+
+ if gopts.JSON {
+ err = printJSONForget(gopts.stdout, jsonGroups)
+ if err != nil {
+ return err
+ }
+ }
}
}
if removeSnapshots > 0 && opts.Prune {
- Verbosef("%d snapshots have been removed, running prune\n", removeSnapshots)
+ if !gopts.JSON {
+ Verbosef("%d snapshots have been removed, running prune\n", removeSnapshots)
+ }
if !opts.DryRun {
return pruneRepository(gopts, repo)
}
@@ -244,3 +218,28 @@ func runForget(opts ForgetOptions, gopts GlobalOptions, args []string) error {
return nil
}
+
+// ForgetGroup helps to print what is forgotten in JSON.
+type ForgetGroup struct {
+ Tags []string `json:"tags"`
+ Host string `json:"host"`
+ Paths []string `json:"paths"`
+ Keep []Snapshot `json:"keep"`
+ Remove []Snapshot `json:"remove"`
+ Reasons []restic.KeepReason `json:"reasons"`
+}
+
+func addJSONSnapshots(js *[]Snapshot, list restic.Snapshots) {
+ for _, sn := range list {
+ k := Snapshot{
+ Snapshot: sn,
+ ID: sn.ID(),
+ ShortID: sn.ID().Str(),
+ }
+ *js = append(*js, k)
+ }
+}
+
+func printJSONForget(stdout io.Writer, forgets []*ForgetGroup) error {
+ return json.NewEncoder(stdout).Encode(forgets)
+}
diff --git a/cmd/restic/cmd_generate.go b/cmd/restic/cmd_generate.go
index 5c42537dc..3c9fba3d2 100644
--- a/cmd/restic/cmd_generate.go
+++ b/cmd/restic/cmd_generate.go
@@ -12,7 +12,7 @@ var cmdGenerate = &cobra.Command{
Use: "generate [command]",
Short: "Generate manual pages and auto-completion files (bash, zsh)",
Long: `
-The "generate" command writes automatically generated files like the man pages
+The "generate" command writes automatically generated files (like the man pages
and the auto-completion files for bash and zsh).
`,
DisableAutoGenTag: true,
diff --git a/cmd/restic/cmd_mount.go b/cmd/restic/cmd_mount.go
index 39ff1a144..69eea376e 100644
--- a/cmd/restic/cmd_mount.go
+++ b/cmd/restic/cmd_mount.go
@@ -149,7 +149,7 @@ func mount(opts MountOptions, gopts GlobalOptions, mountpoint string) error {
}
Printf("Now serving the repository at %s\n", mountpoint)
- Printf("Don't forget to umount after quitting!\n")
+ Printf("When finished, quit with Ctrl-c or umount the mountpoint.\n")
debug.Log("serving mount at %v", mountpoint)
err = fs.Serve(c, root)
diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go
index 477192eab..7c056fdab 100644
--- a/cmd/restic/cmd_restore.go
+++ b/cmd/restic/cmd_restore.go
@@ -6,6 +6,7 @@ import (
"github.com/restic/restic/internal/filter"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/restorer"
+ "strings"
"github.com/spf13/cobra"
)
@@ -28,13 +29,15 @@ repository.
// RestoreOptions collects all options for the restore command.
type RestoreOptions struct {
- Exclude []string
- Include []string
- Target string
- Host string
- Paths []string
- Tags restic.TagLists
- Verify bool
+ Exclude []string
+ InsensitiveExclude []string
+ Include []string
+ InsensitiveInclude []string
+ Target string
+ Host string
+ Paths []string
+ Tags restic.TagLists
+ Verify bool
}
var restoreOptions RestoreOptions
@@ -44,7 +47,9 @@ func init() {
flags := cmdRestore.Flags()
flags.StringArrayVarP(&restoreOptions.Exclude, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)")
+ flags.StringArrayVar(&restoreOptions.InsensitiveExclude, "iexclude", nil, "same as `--exclude` but ignores the casing of filenames")
flags.StringArrayVarP(&restoreOptions.Include, "include", "i", nil, "include a `pattern`, exclude everything else (can be specified multiple times)")
+ flags.StringArrayVar(&restoreOptions.InsensitiveInclude, "iinclude", nil, "same as `--include` but ignores the casing of filenames")
flags.StringVarP(&restoreOptions.Target, "target", "t", "", "directory to extract data to")
flags.StringVarP(&restoreOptions.Host, "host", "H", "", `only consider snapshots for this host when the snapshot ID is "latest"`)
@@ -55,6 +60,16 @@ func init() {
func runRestore(opts RestoreOptions, gopts GlobalOptions, args []string) error {
ctx := gopts.ctx
+ hasExcludes := len(opts.Exclude) > 0 || len(opts.InsensitiveExclude) > 0
+ hasIncludes := len(opts.Include) > 0 || len(opts.InsensitiveInclude) > 0
+
+ for i, str := range opts.InsensitiveExclude {
+ opts.InsensitiveExclude[i] = strings.ToLower(str)
+ }
+
+ for i, str := range opts.InsensitiveInclude {
+ opts.InsensitiveInclude[i] = strings.ToLower(str)
+ }
switch {
case len(args) == 0:
@@ -67,7 +82,7 @@ func runRestore(opts RestoreOptions, gopts GlobalOptions, args []string) error {
return errors.Fatal("please specify a directory to restore to (--target)")
}
- if len(opts.Exclude) > 0 && len(opts.Include) > 0 {
+ if hasExcludes && hasIncludes {
return errors.Fatal("exclude and include patterns are mutually exclusive")
}
@@ -125,11 +140,16 @@ func runRestore(opts RestoreOptions, gopts GlobalOptions, args []string) error {
Warnf("error for exclude pattern: %v", err)
}
+ matchedInsensitive, _, err := filter.List(opts.InsensitiveExclude, strings.ToLower(item))
+ if err != nil {
+ Warnf("error for iexclude pattern: %v", err)
+ }
+
// An exclude filter is basically a 'wildcard but foo',
// so even if a childMayMatch, other children of a dir may not,
// therefore childMayMatch does not matter, but we should not go down
// unless the dir is selected for restore
- selectedForRestore = !matched
+ selectedForRestore = !matched && !matchedInsensitive
childMayBeSelected = selectedForRestore && node.Type == "dir"
return selectedForRestore, childMayBeSelected
@@ -141,15 +161,20 @@ func runRestore(opts RestoreOptions, gopts GlobalOptions, args []string) error {
Warnf("error for include pattern: %v", err)
}
- selectedForRestore = matched
- childMayBeSelected = childMayMatch && node.Type == "dir"
+ matchedInsensitive, childMayMatchInsensitive, err := filter.List(opts.InsensitiveInclude, strings.ToLower(item))
+ if err != nil {
+ Warnf("error for iexclude pattern: %v", err)
+ }
+
+ selectedForRestore = matched || matchedInsensitive
+ childMayBeSelected = (childMayMatch || childMayMatchInsensitive) && node.Type == "dir"
return selectedForRestore, childMayBeSelected
}
- if len(opts.Exclude) > 0 {
+ if hasExcludes {
res.SelectFilter = selectExcludeFilter
- } else if len(opts.Include) > 0 {
+ } else if hasIncludes {
res.SelectFilter = selectIncludeFilter
}
diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go
index d9623b942..94cd08836 100644
--- a/cmd/restic/cmd_snapshots.go
+++ b/cmd/restic/cmd_snapshots.go
@@ -32,6 +32,7 @@ type SnapshotOptions struct {
Paths []string
Compact bool
Last bool
+ GroupBy string
}
var snapshotOptions SnapshotOptions
@@ -45,6 +46,7 @@ func init() {
f.StringArrayVar(&snapshotOptions.Paths, "path", nil, "only consider snapshots for this `path` (can be specified multiple times)")
f.BoolVarP(&snapshotOptions.Compact, "compact", "c", false, "use compact format")
f.BoolVar(&snapshotOptions.Last, "last", false, "only show the last snapshot for each host and path")
+ f.StringVarP(&snapshotOptions.GroupBy, "group-by", "g", "", "string for grouping snapshots by host,paths,tags")
}
func runSnapshots(opts SnapshotOptions, gopts GlobalOptions, args []string) error {
@@ -64,25 +66,41 @@ func runSnapshots(opts SnapshotOptions, gopts GlobalOptions, args []string) erro
ctx, cancel := context.WithCancel(gopts.ctx)
defer cancel()
- var list restic.Snapshots
+ var snapshots restic.Snapshots
for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, args) {
- list = append(list, sn)
+ snapshots = append(snapshots, sn)
}
-
- if opts.Last {
- list = FilterLastSnapshots(list)
+ snapshotGroups, grouped, err := restic.GroupSnapshots(snapshots, opts.GroupBy)
+ if err != nil {
+ return err
}
- sort.Sort(sort.Reverse(list))
+ for k, list := range snapshotGroups {
+ if opts.Last {
+ list = FilterLastSnapshots(list)
+ }
+ sort.Sort(sort.Reverse(list))
+ snapshotGroups[k] = list
+ }
if gopts.JSON {
- err := printSnapshotsJSON(gopts.stdout, list)
+ err := printSnapshotGroupJSON(gopts.stdout, snapshotGroups, grouped)
if err != nil {
- Warnf("error printing snapshot: %v\n", err)
+ Warnf("error printing snapshots: %v\n", err)
}
return nil
}
- PrintSnapshots(gopts.stdout, list, nil, opts.Compact)
+
+ for k, list := range snapshotGroups {
+ if grouped {
+ err := PrintSnapshotGroupHeader(gopts.stdout, k)
+ if err != nil {
+ Warnf("error printing snapshots: %v\n", err)
+ return nil
+ }
+ }
+ PrintSnapshots(gopts.stdout, list, nil, opts.Compact)
+ }
return nil
}
@@ -223,6 +241,42 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, reasons []restic.Ke
tab.Write(stdout)
}
+// PrintSnapshotGroupHeader prints which group of the group-by option the
+// following snapshots belong to.
+// Prints nothing, if we did not group at all.
+func PrintSnapshotGroupHeader(stdout io.Writer, groupKeyJSON string) error {
+ var key restic.SnapshotGroupKey
+ var err error
+
+ err = json.Unmarshal([]byte(groupKeyJSON), &key)
+ if err != nil {
+ return err
+ }
+
+ if key.Hostname == "" && key.Tags == nil && key.Paths == nil {
+ return nil
+ }
+
+ // Info
+ fmt.Fprintf(stdout, "snapshots")
+ var infoStrings []string
+ if key.Hostname != "" {
+ infoStrings = append(infoStrings, "host ["+key.Hostname+"]")
+ }
+ if key.Tags != nil {
+ infoStrings = append(infoStrings, "tags ["+strings.Join(key.Tags, ", ")+"]")
+ }
+ if key.Paths != nil {
+ infoStrings = append(infoStrings, "paths ["+strings.Join(key.Paths, ", ")+"]")
+ }
+ if infoStrings != nil {
+ fmt.Fprintf(stdout, " for (%s)", strings.Join(infoStrings, ", "))
+ }
+ fmt.Fprintf(stdout, ":\n")
+
+ return nil
+}
+
// Snapshot helps to print Snaphots as JSON with their ID included.
type Snapshot struct {
*restic.Snapshot
@@ -231,19 +285,58 @@ type Snapshot struct {
ShortID string `json:"short_id"`
}
+// SnapshotGroup helps to print SnaphotGroups as JSON with their GroupReasons included.
+type SnapshotGroup struct {
+ GroupKey restic.SnapshotGroupKey `json:"group_key"`
+ Snapshots []Snapshot `json:"snapshots"`
+}
+
// printSnapshotsJSON writes the JSON representation of list to stdout.
-func printSnapshotsJSON(stdout io.Writer, list restic.Snapshots) error {
+func printSnapshotGroupJSON(stdout io.Writer, snGroups map[string]restic.Snapshots, grouped bool) error {
+ if grouped {
+ var snapshotGroups []SnapshotGroup
- var snapshots []Snapshot
+ for k, list := range snGroups {
+ var key restic.SnapshotGroupKey
+ var err error
+ var snapshots []Snapshot
- for _, sn := range list {
+ err = json.Unmarshal([]byte(k), &key)
+ if err != nil {
+ return err
+ }
- k := Snapshot{
- Snapshot: sn,
- ID: sn.ID(),
- ShortID: sn.ID().Str(),
+ for _, sn := range list {
+ k := Snapshot{
+ Snapshot: sn,
+ ID: sn.ID(),
+ ShortID: sn.ID().Str(),
+ }
+ snapshots = append(snapshots, k)
+ }
+
+ group := SnapshotGroup{
+ GroupKey: key,
+ Snapshots: snapshots,
+ }
+ snapshotGroups = append(snapshotGroups, group)
+ }
+
+ return json.NewEncoder(stdout).Encode(snapshotGroups)
+ }
+
+ // Old behavior
+ var snapshots []Snapshot
+
+ for _, list := range snGroups {
+ for _, sn := range list {
+ k := Snapshot{
+ Snapshot: sn,
+ ID: sn.ID(),
+ ShortID: sn.ID().Str(),
+ }
+ snapshots = append(snapshots, k)
}
- snapshots = append(snapshots, k)
}
return json.NewEncoder(stdout).Encode(snapshots)
diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go
index 590ef5f14..709b20ec8 100644
--- a/cmd/restic/cmd_stats.go
+++ b/cmd/restic/cmd_stats.go
@@ -36,7 +36,8 @@ The modes are:
* raw-data: Counts the size of blobs in the repository, regardless of
how many files reference them.
* blobs-per-file: A combination of files-by-contents and raw-data.
-* Refer to the online manual for more details about each mode.
+
+Refer to the online manual for more details about each mode.
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
diff --git a/cmd/restic/exclude.go b/cmd/restic/exclude.go
index 479f8a308..09d35b226 100644
--- a/cmd/restic/exclude.go
+++ b/cmd/restic/exclude.go
@@ -88,6 +88,18 @@ func rejectByPattern(patterns []string) RejectByNameFunc {
}
}
+// Same as `rejectByPattern` but case insensitive.
+func rejectByInsensitivePattern(patterns []string) RejectByNameFunc {
+ for index, path := range patterns {
+ patterns[index] = strings.ToLower(path)
+ }
+
+ rejFunc := rejectByPattern(patterns)
+ return func(item string) bool {
+ return rejFunc(strings.ToLower(item))
+ }
+}
+
// rejectIfPresent returns a RejectByNameFunc which itself returns whether a path
// should be excluded. The RejectByNameFunc considers a file to be excluded when
// it resides in a directory with an exclusion file, that is specified by
diff --git a/cmd/restic/exclude_test.go b/cmd/restic/exclude_test.go
index 741dbdb64..6c8ce6e14 100644
--- a/cmd/restic/exclude_test.go
+++ b/cmd/restic/exclude_test.go
@@ -36,6 +36,33 @@ func TestRejectByPattern(t *testing.T) {
}
}
+func TestRejectByInsensitivePattern(t *testing.T) {
+ var tests = []struct {
+ filename string
+ reject bool
+ }{
+ {filename: "/home/user/foo.GO", reject: true},
+ {filename: "/home/user/foo.c", reject: false},
+ {filename: "/home/user/foobar", reject: false},
+ {filename: "/home/user/FOObar/x", reject: true},
+ {filename: "/home/user/README", reject: false},
+ {filename: "/home/user/readme.md", reject: true},
+ }
+
+ patterns := []string{"*.go", "README.md", "/home/user/foobar/*"}
+
+ for _, tc := range tests {
+ t.Run("", func(t *testing.T) {
+ reject := rejectByInsensitivePattern(patterns)
+ res := reject(tc.filename)
+ if res != tc.reject {
+ t.Fatalf("wrong result for filename %v: want %v, got %v",
+ tc.filename, tc.reject, res)
+ }
+ })
+ }
+}
+
func TestIsExcludedByFile(t *testing.T) {
const (
tagFilename = "CACHEDIR.TAG"
diff --git a/cmd/restic/global.go b/cmd/restic/global.go
index acff7ada6..1278cb4f0 100644
--- a/cmd/restic/global.go
+++ b/cmd/restic/global.go
@@ -1,6 +1,7 @@
package main
import (
+ "bufio"
"context"
"fmt"
"io"
@@ -37,7 +38,7 @@ import (
"os/exec"
)
-var version = "0.9.4"
+var version = "0.9.5"
// TimeFormat is the format used for all timestamps printed by restic.
const TimeFormat = "2006-01-02 15:04:05"
@@ -273,15 +274,10 @@ func resolvePassword(opts GlobalOptions) (string, error) {
// readPassword reads the password from the given reader directly.
func readPassword(in io.Reader) (password string, err error) {
- buf := make([]byte, 1000)
- n, err := io.ReadFull(in, buf)
- buf = buf[:n]
+ sc := bufio.NewScanner(in)
+ sc.Scan()
- if err != nil && errors.Cause(err) != io.ErrUnexpectedEOF {
- return "", errors.Wrap(err, "ReadFull")
- }
-
- return strings.TrimRight(string(buf), "\r\n"), nil
+ return sc.Text(), errors.Wrap(err, "Scan")
}
// readPasswordTerminal reads the password from the given reader which must be a
@@ -336,13 +332,15 @@ func ReadPasswordTwice(gopts GlobalOptions, prompt1, prompt2 string) (string, er
if err != nil {
return "", err
}
- pw2, err := ReadPassword(gopts, prompt2)
- if err != nil {
- return "", err
- }
+ if stdinIsTerminal() {
+ pw2, err := ReadPassword(gopts, prompt2)
+ if err != nil {
+ return "", err
+ }
- if pw1 != pw2 {
- return "", errors.Fatal("passwords do not match")
+ if pw1 != pw2 {
+ return "", errors.Fatal("passwords do not match")
+ }
}
return pw1, nil
@@ -377,7 +375,7 @@ func OpenRepository(opts GlobalOptions) (*repository.Repository, error) {
return nil, err
}
- if stdoutIsTerminal() {
+ if stdoutIsTerminal() && !opts.JSON {
id := s.Config().ID
if len(id) > 8 {
id = id[:8]
diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go
index e47000d34..612685f53 100644
--- a/cmd/restic/integration_test.go
+++ b/cmd/restic/integration_test.go
@@ -219,6 +219,35 @@ func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) {
rtest.OK(t, runForget(opts, gopts, args))
}
+func testRunForgetJSON(t testing.TB, gopts GlobalOptions, args ...string) {
+ buf := bytes.NewBuffer(nil)
+ oldJSON := gopts.JSON
+ gopts.stdout = buf
+ gopts.JSON = true
+ defer func() {
+ gopts.stdout = os.Stdout
+ gopts.JSON = oldJSON
+ }()
+
+ opts := ForgetOptions{
+ DryRun: true,
+ Last: 1,
+ }
+
+ rtest.OK(t, runForget(opts, gopts, args))
+
+ var forgets []*ForgetGroup
+ rtest.OK(t, json.Unmarshal(buf.Bytes(), &forgets))
+
+ rtest.Assert(t, len(forgets) == 1,
+ "Expected 1 snapshot group, got %v", len(forgets))
+ rtest.Assert(t, len(forgets[0].Keep) == 1,
+ "Expected 1 snapshot to be kept, got %v", len(forgets[0].Keep))
+ rtest.Assert(t, len(forgets[0].Remove) == 2,
+ "Expected 2 snapshots to be removed, got %v", len(forgets[0].Remove))
+ return
+}
+
func testRunPrune(t testing.TB, gopts GlobalOptions) {
rtest.OK(t, runPrune(gopts))
}
@@ -1051,6 +1080,7 @@ func TestPrune(t *testing.T) {
rtest.Assert(t, len(snapshotIDs) == 3,
"expected 3 snapshot, got %v", snapshotIDs)
+ testRunForgetJSON(t, env.gopts)
testRunForget(t, env.gopts, firstSnapshot[0].String())
testRunPrune(t, env.gopts)
testRunCheck(t, env.gopts)