summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFélix Sipma <felix+debian@gueux.org>2023-08-05 15:53:51 +0200
committerFélix Sipma <felix+debian@gueux.org>2023-08-05 15:57:51 +0200
commit03fcf0fc10f5c5a1e7d0f1f55bcd3e4ea5949036 (patch)
treebc3b128e615b5429a6ab4f49f75e8eaa902be428
parent8794b3d5a50ff126782a370e8a204c3da466a835 (diff)
New upstream version 0.16.0
-rw-r--r--.dockerignore12
-rw-r--r--.github/dependabot.yml4
-rw-r--r--.github/workflows/docker.yml59
-rw-r--r--.github/workflows/tests.yml89
-rw-r--r--.gitignore1
-rw-r--r--.golangci.yml14
-rw-r--r--CHANGELOG.md347
-rw-r--r--CONTRIBUTING.md43
-rw-r--r--VERSION2
-rw-r--r--build.go6
-rw-r--r--changelog/0.16.0_2023-07-31/issue-14957
-rw-r--r--changelog/0.16.0_2023-07-31/issue-175921
-rw-r--r--changelog/0.16.0_2023-07-31/issue-19268
-rw-r--r--changelog/0.16.0_2023-07-31/issue-235911
-rw-r--r--changelog/0.16.0_2023-07-31/issue-246810
-rw-r--r--changelog/0.16.0_2023-07-31/issue-256510
-rw-r--r--changelog/0.16.0_2023-07-31/issue-331112
-rw-r--r--changelog/0.16.0_2023-07-31/issue-33289
-rw-r--r--changelog/0.16.0_2023-07-31/issue-339711
-rw-r--r--changelog/0.16.0_2023-07-31/issue-36249
-rw-r--r--changelog/0.16.0_2023-07-31/issue-36988
-rw-r--r--changelog/0.16.0_2023-07-31/issue-387122
-rw-r--r--changelog/0.16.0_2023-07-31/issue-394117
-rw-r--r--changelog/0.16.0_2023-07-31/issue-41309
-rw-r--r--changelog/0.16.0_2023-07-31/issue-415912
-rw-r--r--changelog/0.16.0_2023-07-31/issue-41888
-rw-r--r--changelog/0.16.0_2023-07-31/issue-41999
-rw-r--r--changelog/0.16.0_2023-07-31/issue-42614
-rw-r--r--changelog/0.16.0_2023-07-31/issue-427411
-rw-r--r--changelog/0.16.0_2023-07-31/issue-43758
-rw-r--r--changelog/0.16.0_2023-07-31/issue-7198
-rw-r--r--changelog/0.16.0_2023-07-31/pull-32618
-rw-r--r--changelog/0.16.0_2023-07-31/pull-41767
-rw-r--r--changelog/0.16.0_2023-07-31/pull-42017
-rw-r--r--changelog/0.16.0_2023-07-31/pull-42206
-rw-r--r--changelog/0.16.0_2023-07-31/pull-42267
-rw-r--r--changelog/0.16.0_2023-07-31/pull-43188
-rw-r--r--changelog/0.16.0_2023-07-31/pull-43333
-rw-r--r--changelog/0.16.0_2023-07-31/pull-44008
-rw-r--r--changelog/TEMPLATE11
-rw-r--r--cmd/restic/cleanup.go7
-rw-r--r--cmd/restic/cmd_backup.go40
-rw-r--r--cmd/restic/cmd_backup_integration_test.go569
-rw-r--r--cmd/restic/cmd_cache.go2
-rw-r--r--cmd/restic/cmd_cat.go33
-rw-r--r--cmd/restic/cmd_check.go11
-rw-r--r--cmd/restic/cmd_check_integration_test.go34
-rw-r--r--cmd/restic/cmd_copy.go10
-rw-r--r--cmd/restic/cmd_copy_integration_test.go136
-rw-r--r--cmd/restic/cmd_debug.go14
-rw-r--r--cmd/restic/cmd_diff.go28
-rw-r--r--cmd/restic/cmd_diff_integration_test.go193
-rw-r--r--cmd/restic/cmd_dump.go9
-rw-r--r--cmd/restic/cmd_find.go40
-rw-r--r--cmd/restic/cmd_find_integration_test.go87
-rw-r--r--cmd/restic/cmd_forget.go107
-rw-r--r--cmd/restic/cmd_forget_integration_test.go13
-rw-r--r--cmd/restic/cmd_forget_test.go94
-rw-r--r--cmd/restic/cmd_generate.go22
-rw-r--r--cmd/restic/cmd_init.go23
-rw-r--r--cmd/restic/cmd_init_integration_test.go49
-rw-r--r--cmd/restic/cmd_key.go8
-rw-r--r--cmd/restic/cmd_key_integration_test.go145
-rw-r--r--cmd/restic/cmd_list.go8
-rw-r--r--cmd/restic/cmd_list_integration_test.go44
-rw-r--r--cmd/restic/cmd_ls.go15
-rw-r--r--cmd/restic/cmd_ls_integration_test.go19
-rw-r--r--cmd/restic/cmd_migrate.go2
-rw-r--r--cmd/restic/cmd_mount.go2
-rw-r--r--cmd/restic/cmd_mount_integration_test.go (renamed from cmd/restic/integration_fuse_test.go)10
-rw-r--r--cmd/restic/cmd_prune.go14
-rw-r--r--cmd/restic/cmd_prune_integration_test.go221
-rw-r--r--cmd/restic/cmd_recover.go2
-rw-r--r--cmd/restic/cmd_repair.go14
-rw-r--r--cmd/restic/cmd_repair_index.go (renamed from cmd/restic/cmd_rebuild_index.go)42
-rw-r--r--cmd/restic/cmd_repair_index_integration_test.go140
-rw-r--r--cmd/restic/cmd_repair_snapshots.go176
-rw-r--r--cmd/restic/cmd_repair_snapshots_integration_test.go135
-rw-r--r--cmd/restic/cmd_restore.go81
-rw-r--r--cmd/restic/cmd_restore_integration_test.go307
-rw-r--r--cmd/restic/cmd_rewrite.go59
-rw-r--r--cmd/restic/cmd_rewrite_integration_test.go (renamed from cmd/restic/integration_rewrite_test.go)0
-rw-r--r--cmd/restic/cmd_snapshots.go12
-rw-r--r--cmd/restic/cmd_snapshots_integration_test.go32
-rw-r--r--cmd/restic/cmd_stats.go192
-rw-r--r--cmd/restic/cmd_stats_test.go62
-rw-r--r--cmd/restic/cmd_tag.go2
-rw-r--r--cmd/restic/cmd_tag_integration_test.go94
-rw-r--r--cmd/restic/exclude.go33
-rw-r--r--cmd/restic/exclude_test.go48
-rw-r--r--cmd/restic/format.go14
-rw-r--r--cmd/restic/format_test.go61
-rw-r--r--cmd/restic/global.go278
-rw-r--r--cmd/restic/global_test.go18
-rw-r--r--cmd/restic/integration_filter_pattern_test.go8
-rw-r--r--cmd/restic/integration_helpers_test.go162
-rw-r--r--cmd/restic/integration_test.go2052
-rw-r--r--cmd/restic/local_layout_test.go41
-rw-r--r--cmd/restic/lock.go152
-rw-r--r--cmd/restic/lock_test.go212
-rw-r--r--cmd/restic/main.go18
-rw-r--r--doc/010_introduction.rst44
-rw-r--r--doc/020_installation.rst19
-rw-r--r--doc/030_preparing_a_new_repo.rst46
-rw-r--r--doc/040_backup.rst47
-rw-r--r--doc/045_working_with_repos.rst10
-rw-r--r--doc/050_restore.rst25
-rw-r--r--doc/060_forget.rst5
-rw-r--r--doc/070_encryption.rst2
-rw-r--r--doc/075_scripting.rst547
-rw-r--r--doc/077_troubleshooting.rst194
-rw-r--r--doc/090_participating.rst4
-rw-r--r--doc/110_talks.rst8
-rw-r--r--doc/bash-completion.sh290
-rw-r--r--doc/conf.py2
-rw-r--r--doc/design.rst79
-rw-r--r--doc/developer_information.rst4
-rw-r--r--doc/fish-completion.fish72
-rw-r--r--doc/index.rst1
-rw-r--r--doc/man/restic-backup.116
-rw-r--r--doc/man/restic-cache.110
-rw-r--r--doc/man/restic-cat.112
-rw-r--r--doc/man/restic-check.112
-rw-r--r--doc/man/restic-copy.110
-rw-r--r--doc/man/restic-diff.110
-rw-r--r--doc/man/restic-dump.110
-rw-r--r--doc/man/restic-find.114
-rw-r--r--doc/man/restic-forget.124
-rw-r--r--doc/man/restic-generate.110
-rw-r--r--doc/man/restic-init.110
-rw-r--r--doc/man/restic-key.110
-rw-r--r--doc/man/restic-list.110
-rw-r--r--doc/man/restic-ls.114
-rw-r--r--doc/man/restic-migrate.110
-rw-r--r--doc/man/restic-mount.110
-rw-r--r--doc/man/restic-prune.110
-rw-r--r--doc/man/restic-recover.110
-rw-r--r--doc/man/restic-repair-index.1 (renamed from doc/man/restic-rebuild-index.1)20
-rw-r--r--doc/man/restic-repair-snapshots.1157
-rw-r--r--doc/man/restic-repair.1113
-rw-r--r--doc/man/restic-restore.110
-rw-r--r--doc/man/restic-rewrite.110
-rw-r--r--doc/man/restic-self-update.110
-rw-r--r--doc/man/restic-snapshots.112
-rw-r--r--doc/man/restic-stats.110
-rw-r--r--doc/man/restic-tag.110
-rw-r--r--doc/man/restic-unlock.110
-rw-r--r--doc/man/restic-version.110
-rw-r--r--doc/man/restic.115
-rw-r--r--doc/manual_rest.rst23
-rw-r--r--doc/powershell-completion.ps121
-rw-r--r--doc/zsh-completion.zsh11
-rw-r--r--docker/Dockerfile4
-rw-r--r--docker/Dockerfile.release18
-rw-r--r--docker/README.md6
-rw-r--r--go.mod71
-rw-r--r--go.sum196
-rw-r--r--helpers/build-release-binaries/main.go89
-rw-r--r--helpers/prepare-release/main.go26
-rw-r--r--internal/archiver/archiver.go4
-rw-r--r--internal/archiver/archiver_test.go16
-rw-r--r--internal/archiver/archiver_unix_test.go3
-rw-r--r--internal/archiver/archiver_windows_test.go3
-rw-r--r--internal/archiver/blob_saver_test.go2
-rw-r--r--internal/archiver/file_saver_test.go2
-rw-r--r--internal/archiver/tree_saver.go3
-rw-r--r--internal/archiver/tree_saver_test.go2
-rw-r--r--internal/backend/azure/azure.go105
-rw-r--r--internal/backend/azure/azure_test.go68
-rw-r--r--internal/backend/azure/config.go38
-rw-r--r--internal/backend/azure/config_test.go31
-rw-r--r--internal/backend/b2/b2.go128
-rw-r--r--internal/backend/b2/b2_test.go48
-rw-r--r--internal/backend/b2/config.go18
-rw-r--r--internal/backend/b2/config_test.go37
-rw-r--r--internal/backend/dryrun/dry_backend.go11
-rw-r--r--internal/backend/dryrun/dry_backend_test.go2
-rw-r--r--internal/backend/foreground_unix.go26
-rw-r--r--internal/backend/gs/config.go19
-rw-r--r--internal/backend/gs/config_test.go34
-rw-r--r--internal/backend/gs/gs.go100
-rw-r--r--internal/backend/gs/gs_test.go61
-rw-r--r--internal/backend/layout/layout.go2
-rw-r--r--internal/backend/limiter/limiter_backend.go17
-rw-r--r--internal/backend/local/config.go4
-rw-r--r--internal/backend/local/config_test.go46
-rw-r--r--internal/backend/local/local.go73
-rw-r--r--internal/backend/local/local_test.go38
-rw-r--r--internal/backend/location/display_location_test.go115
-rw-r--r--internal/backend/location/location.go61
-rw-r--r--internal/backend/location/location_test.go353
-rw-r--r--internal/backend/location/registry.go106
-rw-r--r--internal/backend/logger/log.go79
-rw-r--r--internal/backend/mem/mem_backend.go69
-rw-r--r--internal/backend/mem/mem_backend_test.go49
-rw-r--r--internal/backend/rclone/backend.go19
-rw-r--r--internal/backend/rclone/backend_test.go39
-rw-r--r--internal/backend/rclone/config.go4
-rw-r--r--internal/backend/rclone/config_test.go43
-rw-r--r--internal/backend/rclone/internal_test.go4
-rw-r--r--internal/backend/rest/config.go4
-rw-r--r--internal/backend/rest/config_test.go90
-rw-r--r--internal/backend/rest/rest.go106
-rw-r--r--internal/backend/rest/rest_int_test.go6
-rw-r--r--internal/backend/rest/rest_test.go43
-rw-r--r--internal/backend/retry/backend_retry.go4
-rw-r--r--internal/backend/retry/testing.go2
-rw-r--r--internal/backend/s3/config.go23
-rw-r--r--internal/backend/s3/config_test.go67
-rw-r--r--internal/backend/s3/s3.go95
-rw-r--r--internal/backend/s3/s3_test.go187
-rw-r--r--internal/backend/sema/backend.go130
-rw-r--r--internal/backend/sema/backend_test.go235
-rw-r--r--internal/backend/sema/semaphore.go58
-rw-r--r--internal/backend/sftp/config.go6
-rw-r--r--internal/backend/sftp/config_test.go81
-rw-r--r--internal/backend/sftp/sftp.go88
-rw-r--r--internal/backend/sftp/sftp_test.go39
-rw-r--r--internal/backend/swift/config.go55
-rw-r--r--internal/backend/swift/config_test.go42
-rw-r--r--internal/backend/swift/swift.go89
-rw-r--r--internal/backend/swift/swift_test.go60
-rw-r--r--internal/backend/test/benchmarks.go8
-rw-r--r--internal/backend/test/config.go28
-rw-r--r--internal/backend/test/suite.go83
-rw-r--r--internal/backend/test/tests.go66
-rw-r--r--internal/backend/utils.go26
-rw-r--r--internal/backend/utils_test.go2
-rw-r--r--internal/cache/backend.go43
-rw-r--r--internal/cache/backend_test.go2
-rw-r--r--internal/cache/cache.go50
-rw-r--r--internal/cache/cache_test.go46
-rw-r--r--internal/cache/file_test.go17
-rw-r--r--internal/checker/checker_test.go4
-rw-r--r--internal/crypto/crypto.go2
-rw-r--r--internal/debug/stacktrace.go15
-rw-r--r--internal/debug/testing.go23
-rw-r--r--internal/dump/tar.go4
-rw-r--r--internal/dump/tar_test.go30
-rw-r--r--internal/fs/fs_reader.go12
-rw-r--r--internal/fs/preallocate_darwin.go (renamed from internal/restorer/preallocate_darwin.go)4
-rw-r--r--internal/fs/preallocate_linux.go (renamed from internal/restorer/preallocate_linux.go)4
-rw-r--r--internal/fs/preallocate_other.go (renamed from internal/restorer/preallocate_other.go)4
-rw-r--r--internal/fs/preallocate_test.go (renamed from internal/restorer/preallocate_test.go)7
-rw-r--r--internal/fs/vss.go2
-rw-r--r--internal/fuse/dir.go24
-rw-r--r--internal/fuse/file.go21
-rw-r--r--internal/fuse/fuse_test.go29
-rw-r--r--internal/fuse/link.go13
-rw-r--r--internal/fuse/other.go4
-rw-r--r--internal/fuse/snapshots_dir.go13
-rw-r--r--internal/fuse/xattr.go27
-rw-r--r--internal/index/index_parallel.go2
-rw-r--r--internal/index/indexmap.go167
-rw-r--r--internal/index/indexmap_test.go15
-rw-r--r--internal/index/master_index_test.go20
-rw-r--r--internal/migrations/s3_layout.go22
-rw-r--r--internal/migrations/upgrade_repo_v2.go2
-rw-r--r--internal/repository/key.go2
-rw-r--r--internal/repository/repack.go2
-rw-r--r--internal/repository/repository.go37
-rw-r--r--internal/repository/repository_test.go16
-rw-r--r--internal/repository/testing.go2
-rw-r--r--internal/restic/backend.go35
-rw-r--r--internal/restic/backend_test.go38
-rw-r--r--internal/restic/config_test.go10
-rw-r--r--internal/restic/find_test.go10
-rw-r--r--internal/restic/json.go2
-rw-r--r--internal/restic/lock.go64
-rw-r--r--internal/restic/lock_test.go77
-rw-r--r--internal/restic/node.go62
-rw-r--r--internal/restic/node_test.go162
-rw-r--r--internal/restic/node_unix_test.go13
-rw-r--r--internal/restic/node_xattr.go6
-rw-r--r--internal/restic/parallel.go2
-rw-r--r--internal/restic/repository.go8
-rw-r--r--internal/restic/snapshot.go4
-rw-r--r--internal/restic/snapshot_find.go39
-rw-r--r--internal/restic/snapshot_find_test.go62
-rw-r--r--internal/restic/snapshot_group.go85
-rw-r--r--internal/restic/snapshot_group_test.go50
-rw-r--r--internal/restic/snapshot_policy.go57
-rw-r--r--internal/restic/snapshot_policy_test.go61
-rw-r--r--internal/restic/testdata/policy_keep_snapshots_1618
-rw-r--r--internal/restic/testdata/policy_keep_snapshots_1718
-rw-r--r--internal/restic/testdata/policy_keep_snapshots_3516
-rw-r--r--internal/restic/testdata/policy_keep_snapshots_361782
-rw-r--r--internal/restic/testdata/policy_keep_snapshots_371872
-rw-r--r--internal/restic/testdata/policy_keep_snapshots_381538
-rw-r--r--internal/restic/testdata/policy_keep_snapshots_39211
-rw-r--r--internal/restic/testdata/used_blobs_snapshot06
-rw-r--r--internal/restic/testdata/used_blobs_snapshot14
-rw-r--r--internal/restic/testdata/used_blobs_snapshot28
-rw-r--r--internal/restic/testing.go81
-rw-r--r--internal/restic/testing_test.go4
-rw-r--r--internal/restic/tree.go31
-rw-r--r--internal/restic/tree_test.go34
-rw-r--r--internal/restorer/filerestorer.go16
-rw-r--r--internal/restorer/filerestorer_test.go6
-rw-r--r--internal/restorer/fileswriter.go3
-rw-r--r--internal/restorer/restorer.go52
-rw-r--r--internal/restorer/restorer_test.go12
-rw-r--r--internal/restorer/restorer_unix_test.go57
-rw-r--r--internal/selfupdate/download.go14
-rw-r--r--internal/selfupdate/download_unix.go2
-rw-r--r--internal/test/helpers.go26
-rw-r--r--internal/ui/backup/json.go21
-rw-r--r--internal/ui/backup/progress.go34
-rw-r--r--internal/ui/backup/progress_test.go16
-rw-r--r--internal/ui/backup/rate_estimator.go98
-rw-r--r--internal/ui/backup/rate_estimator_test.go213
-rw-r--r--internal/ui/backup/text.go10
-rw-r--r--internal/ui/format.go52
-rw-r--r--internal/ui/format_test.go50
-rw-r--r--internal/ui/progress/counter_test.go2
-rw-r--r--internal/ui/progress/updater_test.go3
-rw-r--r--internal/ui/restore/json.go69
-rw-r--r--internal/ui/restore/json_test.go29
-rw-r--r--internal/ui/restore/progress.go90
-rw-r--r--internal/ui/restore/progress_test.go137
-rw-r--r--internal/ui/restore/text.go47
-rw-r--r--internal/ui/restore/text_test.go41
-rw-r--r--internal/ui/table/table.go4
-rw-r--r--internal/ui/termstatus/background_linux.go8
-rw-r--r--internal/ui/termstatus/status.go57
-rw-r--r--internal/ui/termstatus/status_test.go70
-rw-r--r--internal/ui/termstatus/terminal_posix.go4
-rw-r--r--internal/ui/termstatus/terminal_unix.go4
-rw-r--r--internal/ui/termstatus/terminal_windows.go8
-rw-r--r--internal/walker/rewriter.go118
-rw-r--r--internal/walker/rewriter_test.go210
-rw-r--r--internal/walker/walker_test.go7
332 files changed, 16347 insertions, 5800 deletions
diff --git a/.dockerignore b/.dockerignore
deleted file mode 100644
index 2e1b785e0..000000000
--- a/.dockerignore
+++ /dev/null
@@ -1,12 +0,0 @@
-# Folders
-.git/
-.github/
-changelog/
-doc/
-docker/
-helpers/
-
-# Files
-.gitignore
-.golangci.yml
-*.md
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index d608a8244..07f6b705b 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -4,10 +4,10 @@ updates:
- package-ecosystem: "gomod"
directory: "/" # Location of package manifests
schedule:
- interval: "weekly"
+ interval: "monthly"
# Dependencies listed in .github/workflows/*.yml
- package-ecosystem: "github-actions"
directory: "/"
schedule:
- interval: "weekly"
+ interval: "monthly"
diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml
new file mode 100644
index 000000000..43c427109
--- /dev/null
+++ b/.github/workflows/docker.yml
@@ -0,0 +1,59 @@
+
+name: Create and publish a Docker image
+
+on:
+ push:
+ tags:
+ - 'v*'
+ branches:
+ - 'master'
+
+env:
+ REGISTRY: ghcr.io
+ IMAGE_NAME: ${{ github.repository }}
+
+jobs:
+ build-and-push-image:
+ if: github.repository == 'restic/restic'
+ runs-on: ubuntu-latest
+ permissions:
+ contents: read
+ packages: write
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v3
+
+ - name: Log in to the Container registry
+ uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
+ with:
+ registry: ${{ env.REGISTRY }}
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Extract metadata (tags, labels) for Docker
+ id: meta
+ uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
+ with:
+ images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
+ tags: |
+ type=ref,event=branch
+ type=semver,pattern={{version}}
+ type=semver,pattern={{major}}.{{minor}}
+
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@4b4e9c3e2d4531116a6f8ba8e71fc6e2cb6e6c8c
+
+ - name: Build and push Docker image
+ uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4
+ with:
+ push: true
+ context: .
+ file: docker/Dockerfile.release
+ platforms: linux/386,linux/amd64,linux/arm,linux/arm64
+ pull: true
+ tags: ${{ steps.meta.outputs.tags }}
+ labels: ${{ steps.meta.outputs.labels }}
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 9c9555543..2ec4591f0 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -7,9 +7,13 @@ on:
# run tests for all pull requests
pull_request:
+ merge_group:
+
+permissions:
+ contents: read
env:
- latest_go: "1.19.x"
+ latest_go: "1.20.x"
GO111MODULE: on
jobs:
@@ -19,28 +23,33 @@ jobs:
# list of jobs to run:
include:
- job_name: Windows
- go: 1.19.x
+ go: 1.20.x
os: windows-latest
- job_name: macOS
- go: 1.19.x
+ go: 1.20.x
os: macOS-latest
test_fuse: false
- job_name: Linux
- go: 1.19.x
+ go: 1.20.x
os: ubuntu-latest
test_cloud_backends: true
test_fuse: true
check_changelog: true
- job_name: Linux (race)
- go: 1.19.x
+ go: 1.20.x
os: ubuntu-latest
test_fuse: true
test_opts: "-race"
- job_name: Linux
+ go: 1.19.x
+ os: ubuntu-latest
+ test_fuse: true
+
+ - job_name: Linux
go: 1.18.x
os: ubuntu-latest
test_fuse: true
@@ -53,7 +62,7 @@ jobs:
steps:
- name: Set up Go ${{ matrix.go }}
- uses: actions/setup-go@v3
+ uses: actions/setup-go@v4
with:
go-version: ${{ matrix.go }}
@@ -132,6 +141,14 @@ jobs:
run: |
go run build.go
+ - name: Minimal test
+ run: |
+ ./restic init
+ ./restic backup .
+ env:
+ RESTIC_REPOSITORY: ../testrepo
+ RESTIC_PASSWORD: password
+
- name: Run local Tests
env:
RESTIC_TEST_FUSE: ${{ matrix.test_fuse }}
@@ -179,7 +196,7 @@ jobs:
# own repo, otherwise the secrets are not available
# Skip for Dependabot pull requests as these are run without secrets
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions#responding-to-events
- if: (github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository) && (github.actor != 'dependabot[bot]') && matrix.test_cloud_backends
+ if: ((github.repository == 'restic/restic' && github.event_name == 'push') || github.event.pull_request.head.repo.full_name == github.repository) && (github.actor != 'dependabot[bot]') && matrix.test_cloud_backends
- name: Check changelog files with calens
run: |
@@ -193,56 +210,41 @@ jobs:
cross_compile:
strategy:
- # ATTENTION: the list of architectures must be in sync with helpers/build-release-binaries/main.go!
matrix:
# run cross-compile in three batches parallel so the overall tests run faster
- targets:
- - "linux/386 linux/amd64 linux/arm linux/arm64 linux/ppc64le linux/mips linux/mipsle linux/mips64 linux/mips64le linux/riscv64 linux/s390x"
-
- - "openbsd/386 openbsd/amd64 \
- freebsd/386 freebsd/amd64 freebsd/arm \
- aix/ppc64 \
- darwin/amd64 darwin/arm64"
-
- - "netbsd/386 netbsd/amd64 \
- windows/386 windows/amd64 \
- solaris/amd64"
+ subset:
+ - "0/3"
+ - "1/3"
+ - "2/3"
env:
GOPROXY: https://proxy.golang.org
runs-on: ubuntu-latest
- name: Cross Compile for ${{ matrix.targets }}
+ name: Cross Compile for subset ${{ matrix.subset }}
steps:
- name: Set up Go ${{ env.latest_go }}
- uses: actions/setup-go@v3
+ uses: actions/setup-go@v4
with:
go-version: ${{ env.latest_go }}
- - name: Install gox
- run: |
- go install github.com/mitchellh/gox@latest
-
- name: Check out code
uses: actions/checkout@v3
- - name: Cross-compile with gox for ${{ matrix.targets }}
- env:
- GOFLAGS: "-trimpath"
- GOX_ARCHS: "${{ matrix.targets }}"
+ - name: Cross-compile for subset ${{ matrix.subset }}
run: |
- mkdir build-output
- gox -parallel 2 -verbose -osarch "$GOX_ARCHS" -output "build-output/{{.Dir}}_{{.OS}}_{{.Arch}}" ./cmd/restic
- gox -parallel 2 -verbose -osarch "$GOX_ARCHS" -tags debug -output "build-output/{{.Dir}}_{{.OS}}_{{.Arch}}_debug" ./cmd/restic
+ mkdir build-output build-output-debug
+ go run ./helpers/build-release-binaries/main.go -o build-output -s . --platform-subset ${{ matrix.subset }}
+ go run ./helpers/build-release-binaries/main.go -o build-output-debug -s . --platform-subset ${{ matrix.subset }} --tags debug
lint:
name: lint
runs-on: ubuntu-latest
steps:
- name: Set up Go ${{ env.latest_go }}
- uses: actions/setup-go@v3
+ uses: actions/setup-go@v4
with:
go-version: ${{ env.latest_go }}
@@ -253,9 +255,7 @@ jobs:
uses: golangci/golangci-lint-action@v3
with:
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
- version: v1.49
- # Optional: show only new issues if it's a pull request. The default value is `false`.
- only-new-issues: true
+ version: v1.52.2
args: --verbose --timeout 5m
# only run golangci-lint for pull requests, otherwise ALL hints get
@@ -269,6 +269,21 @@ jobs:
go mod tidy
git diff --exit-code go.mod go.sum
+ analyze:
+ name: Analyze results
+ needs: [test, cross_compile, lint]
+ if: always()
+
+ permissions: # no need to access code
+ contents: none
+
+ runs-on: ubuntu-latest
+ steps:
+ - name: Decide whether the needed jobs succeeded or failed
+ uses: re-actors/alls-green@05ac9388f0aebcb5727afa17fcccfecd6f8ec5fe
+ with:
+ jobs: ${{ toJSON(needs) }}
+
docker:
name: docker
runs-on: ubuntu-latest
@@ -301,7 +316,7 @@ jobs:
- name: Build and push
id: docker_build
- uses: docker/build-push-action@v3
+ uses: docker/build-push-action@v4
with:
push: false
context: .
diff --git a/.gitignore b/.gitignore
index 812d314b6..b7201c26b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,4 @@
/restic
+/restic.exe
/.vagrant
/.vscode
diff --git a/.golangci.yml b/.golangci.yml
index d97b3bd9b..98b5f9e03 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -10,13 +10,10 @@ linters:
# make sure all errors returned by functions are handled
- errcheck
- # find unused code
- - deadcode
-
# show how code can be simplified
- gosimple
- # # make sure code is formatted
+ # make sure code is formatted
- gofmt
# examine code and report suspicious constructs, such as Printf calls whose
@@ -35,12 +32,6 @@ linters:
# find unused variables, functions, structs, types, etc.
- unused
- # find unused struct fields
- - structcheck
-
- # find unused global variables
- - varcheck
-
# parse and typecheck code
- typecheck
@@ -57,3 +48,6 @@ issues:
- don't use ALL_CAPS in Go names; use CamelCase
# revive: lots of packages don't have such a comment
- "package-comments: should have a package comment"
+ # staticcheck: there's no easy way to replace these packages
+ - "SA1019: \"golang.org/x/crypto/poly1305\" is deprecated"
+ - "SA1019: \"golang.org/x/crypto/openpgp\" is deprecated"
diff --git a/CHANGELOG.md b/CHANGELOG.md
index a502d49e2..9f9b99043 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,350 @@
+Changelog for restic 0.16.0 (2023-07-31)
+=======================================
+
+The following sections list the changes in restic 0.16.0 relevant to
+restic users. The changes are ordered by importance.
+
+Summary
+-------
+
+ * Fix #2565: Support "unlimited" in `forget --keep-*` options
+ * Fix #3311: Support non-UTF8 paths as symlink target
+ * Fix #4199: Avoid lock refresh issues on slow network connections
+ * Fix #4274: Improve lock refresh handling after standby
+ * Fix #4319: Correctly clean up status bar output of the `backup` command
+ * Fix #4333: `generate` and `init` no longer silently ignore unexpected arguments
+ * Fix #4400: Ignore missing folders in `rest` backend
+ * Chg #4176: Fix JSON message type of `scan_finished` for the `backup` command
+ * Chg #4201: Require Go 1.20 for Solaris builds
+ * Enh #426: Show progress bar during restore
+ * Enh #719: Add `--retry-lock` option
+ * Enh #1495: Sort snapshots by timestamp in `restic find`
+ * Enh #1759: Add `repair index` and `repair snapshots` commands
+ * Enh #1926: Allow certificate paths to be passed through environment variables
+ * Enh #2359: Provide multi-platform Docker images
+ * Enh #2468: Add support for non-global Azure clouds
+ * Enh #2679: Reduce file fragmentation for local backend
+ * Enh #3328: Reduce memory usage by up to 25%
+ * Enh #3397: Improve accuracy of ETA displayed during backup
+ * Enh #3624: Keep oldest snapshot when there are not enough snapshots
+ * Enh #3698: Add support for Managed / Workload Identity to `azure` backend
+ * Enh #3871: Support `<snapshot>:<subfolder>` syntax to select subfolders
+ * Enh #3941: Support `--group-by` for backup parent selection
+ * Enh #4130: Cancel current command if cache becomes unusable
+ * Enh #4159: Add `--human-readable` option to `ls` and `find` commands
+ * Enh #4188: Include restic version in snapshot metadata
+ * Enh #4220: Add `jq` binary to Docker image
+ * Enh #4226: Allow specifying region of new buckets in the `gs` backend
+ * Enh #4375: Add support for extended attributes on symlinks
+
+Details
+-------
+
+ * Bugfix #2565: Support "unlimited" in `forget --keep-*` options
+
+ Restic would previously forget snapshots that should have been kept when a negative value was
+ passed to the `--keep-*` options. Negative values are now forbidden. To keep all snapshots,
+ the special value `unlimited` is now supported. For example, `--keep-monthly unlimited`
+ will keep all monthly snapshots.
+
+ https://github.com/restic/restic/issues/2565
+ https://github.com/restic/restic/pull/4234
+
+ * Bugfix #3311: Support non-UTF8 paths as symlink target
+
+ Earlier restic versions did not correctly `backup` and `restore` symlinks that contain a
+ non-UTF8 target. Note that this only affected systems that still use a non-Unicode encoding
+ for filesystem paths.
+
+ The repository format is now extended to add support for such symlinks. Please note that
+ snapshots must have been created with at least restic version 0.16.0 for `restore` to
+ correctly handle non-UTF8 symlink targets when restoring them.
+
+ https://github.com/restic/restic/issues/3311
+ https://github.com/restic/restic/pull/3802
+
+ * Bugfix #4199: Avoid lock refresh issues on slow network connections
+
+ On network connections with a low upload speed, backups and other operations could fail with
+ the error message `Fatal: failed to refresh lock in time`.
+
+ This has now been fixed by reworking the lock refresh handling.
+
+ https://github.com/restic/restic/issues/4199
+ https://github.com/restic/restic/pull/4304
+
+ * Bugfix #4274: Improve lock refresh handling after standby
+
+ If the restic process was stopped or the host running restic entered standby during a long
+ running operation such as a backup, this previously resulted in the operation failing with
+ `Fatal: failed to refresh lock in time`.
+
+ This has now been fixed such that restic first checks whether it is safe to continue the current
+ operation and only throws an error if not.
+
+ https://github.com/restic/restic/issues/4274
+ https://github.com/restic/restic/pull/4374
+
+ * Bugfix #4319: Correctly clean up status bar output of the `backup` command
+
+ Due to a regression in restic 0.15.2, the status bar of the `backup` command could leave some
+ output behind. This happened if filenames were printed that are wider than the current
+ terminal width. This has now been fixed.
+
+ https://github.com/restic/restic/issues/4319
+ https://github.com/restic/restic/pull/4318
+
+ * Bugfix #4333: `generate` and `init` no longer silently ignore unexpected arguments
+
+ https://github.com/restic/restic/pull/4333
+
+ * Bugfix #4400: Ignore missing folders in `rest` backend
+
+ If a repository accessed via the REST backend was missing folders, then restic would fail with
+ an error while trying to list the data in the repository. This has been now fixed.
+
+ https://github.com/restic/rest-server/issues/235
+ https://github.com/restic/restic/pull/4400
+
+ * Change #4176: Fix JSON message type of `scan_finished` for the `backup` command
+
+ Restic incorrectly set the `message_type` of the `scan_finished` message to `status`
+ instead of `verbose_status`. This has now been corrected so that the messages report the
+ correct type.
+
+ https://github.com/restic/restic/pull/4176
+
+ * Change #4201: Require Go 1.20 for Solaris builds
+
+ Building restic on Solaris now requires Go 1.20, as the library used to access Azure uses the
+ mmap syscall, which is only available on Solaris starting from Go 1.20. All other platforms
+ however continue to build with Go 1.18.
+
+ https://github.com/restic/restic/pull/4201
+
+ * Enhancement #426: Show progress bar during restore
+
+ The `restore` command now shows a progress report while restoring files.
+
+ Example: `[0:42] 5.76% 23 files 12.98 MiB, total 3456 files 23.54 GiB`
+
+ JSON output is now also supported.
+
+ https://github.com/restic/restic/issues/426
+ https://github.com/restic/restic/issues/3413
+ https://github.com/restic/restic/issues/3627
+ https://github.com/restic/restic/pull/3991
+ https://github.com/restic/restic/pull/4314
+ https://forum.restic.net/t/progress-bar-for-restore/5210
+
+ * Enhancement #719: Add `--retry-lock` option
+
+ This option allows specifying a duration for which restic will wait if the repository is
+ already locked.
+
+ https://github.com/restic/restic/issues/719
+ https://github.com/restic/restic/pull/2214
+ https://github.com/restic/restic/pull/4107
+
+ * Enhancement #1495: Sort snapshots by timestamp in `restic find`
+
+ The `find` command used to print snapshots in an arbitrary order. Restic now prints snapshots
+ sorted by timestamp.
+
+ https://github.com/restic/restic/issues/1495
+ https://github.com/restic/restic/pull/4409
+
+ * Enhancement #1759: Add `repair index` and `repair snapshots` commands
+
+ The `rebuild-index` command has been renamed to `repair index`. The old name will still work,
+ but is deprecated.
+
+ When a snapshot was damaged, the only option up to now was to completely forget the snapshot,
+ even if only some unimportant files in it were damaged and other files were still fine.
+
+ Restic now has a `repair snapshots` command, which can salvage any non-damaged files and parts
+ of files in the snapshots by removing damaged directories and missing file contents. Please
+ note that the damaged data may still be lost and see the "Troubleshooting" section in the
+ documentation for more details.
+
+ https://github.com/restic/restic/issues/1759
+ https://github.com/restic/restic/issues/1714
+ https://github.com/restic/restic/issues/1798
+ https://github.com/restic/restic/issues/2334
+ https://github.com/restic/restic/pull/2876
+ https://forum.restic.net/t/corrupted-repo-how-to-repair/799
+ https://forum.restic.net/t/recovery-options-for-damaged-repositories/1571
+
+ * Enhancement #1926: Allow certificate paths to be passed through environment variables
+
+ Restic will now read paths to certificates from the environment variables `RESTIC_CACERT` or
+ `RESTIC_TLS_CLIENT_CERT` if `--cacert` or `--tls-client-cert` are not specified.
+
+ https://github.com/restic/restic/issues/1926
+ https://github.com/restic/restic/pull/4384
+
+ * Enhancement #2359: Provide multi-platform Docker images
+
+ The official Docker images are now built for the architectures linux/386, linux/amd64,
+ linux/arm and linux/arm64.
+
+ As an alternative to the Docker Hub, the Docker images are also available on ghcr.io, the GitHub
+ Container Registry.
+
+ https://github.com/restic/restic/issues/2359
+ https://github.com/restic/restic/issues/4269
+ https://github.com/restic/restic/pull/4364
+
+ * Enhancement #2468: Add support for non-global Azure clouds
+
+ The `azure` backend previously only supported storages using the global domain
+ `core.windows.net`. This meant that backups to other domains such as Azure China
+ (`core.chinacloudapi.cn`) or Azure Germany (`core.cloudapi.de`) were not supported.
+ Restic now allows overriding the global domain using the environment variable
+ `AZURE_ENDPOINT_SUFFIX`.
+
+ https://github.com/restic/restic/issues/2468
+ https://github.com/restic/restic/pull/4387
+
+ * Enhancement #2679: Reduce file fragmentation for local backend
+
+ Before this change, local backend files could become fragmented. Now restic will try to
+ preallocate space for pack files to avoid their fragmentation.
+
+ https://github.com/restic/restic/issues/2679
+ https://github.com/restic/restic/pull/3261
+
+ * Enhancement #3328: Reduce memory usage by up to 25%
+
+ The in-memory index has been optimized to be more garbage collection friendly. Restic now
+ defaults to `GOGC=50` to run the Go garbage collector more frequently.
+
+ https://github.com/restic/restic/issues/3328
+ https://github.com/restic/restic/pull/4352
+ https://github.com/restic/restic/pull/4353
+
+ * Enhancement #3397: Improve accuracy of ETA displayed during backup
+
+ Restic's `backup` command displayed an ETA that did not adapt when the rate of progress made
+ during the backup changed during the course of the backup.
+
+ Restic now uses recent progress when computing the ETA. It is important to realize that the
+ estimate may still be wrong, because restic cannot predict the future, but the hope is that the
+ ETA will be more accurate in most cases.
+
+ https://github.com/restic/restic/issues/3397
+ https://github.com/restic/restic/pull/3563
+
+ * Enhancement #3624: Keep oldest snapshot when there are not enough snapshots
+
+ The `forget` command now additionally preserves the oldest snapshot if fewer snapshots than
+ allowed by the `--keep-*` parameters would otherwise be kept. This maximizes the amount of
+ history kept within the specified limits.
+
+ https://github.com/restic/restic/issues/3624
+ https://github.com/restic/restic/pull/4366
+ https://forum.restic.net/t/keeping-yearly-snapshots-policy-when-backup-began-during-the-year/4670/2
+
+ * Enhancement #3698: Add support for Managed / Workload Identity to `azure` backend
+
+ Restic now additionally supports authenticating to Azure using Workload Identity or Managed
+ Identity credentials, which are automatically injected in several environments such as a
+ managed Kubernetes cluster.
+
+ https://github.com/restic/restic/issues/3698
+ https://github.com/restic/restic/pull/4029
+
+ * Enhancement #3871: Support `<snapshot>:<subfolder>` syntax to select subfolders
+
+ Commands like `diff` or `restore` always worked with the full snapshot. This did not allow
+ comparing only a specific subfolder or only restoring that folder (`restore --include
+ subfolder` filters the restored files, but still creates the directories included in
+ `subfolder`).
+
+ The commands `diff`, `dump`, `ls` and `restore` now support the `<snapshot>:<subfolder>`
+ syntax, where `snapshot` is the ID of a snapshot (or the string `latest`) and `subfolder` is a
+ path within the snapshot. The commands will then only work with the specified path of the
+ snapshot. The `subfolder` must be a path to a folder as returned by `ls`. Two examples:
+
+ `restic restore -t target latest:/some/path` `restic diff 12345678:/some/path
+ 90abcef:/some/path`
+
+ For debugging purposes, the `cat` command now supports `cat tree <snapshot>:<subfolder>` to
+ return the directory metadata for the given subfolder.
+
+ https://github.com/restic/restic/issues/3871
+ https://github.com/restic/restic/pull/4334
+
+ * Enhancement #3941: Support `--group-by` for backup parent selection
+
+ Previously, the `backup` command by default selected the parent snapshot based on the
+ hostname and the backup targets. When the backup path list changed, the `backup` command was
+ unable to determine a suitable parent snapshot and had to read all files again.
+
+ The new `--group-by` option for the `backup` command allows filtering snapshots for the
+ parent selection by `host`, `paths` and `tags`. It defaults to `host,paths` which selects the
+ latest snapshot with hostname and paths matching those of the backup run. This matches the
+ behavior of prior restic versions.
+
+ The new `--group-by` option should be set to the same value as passed to `forget --group-by`.
+
+ https://github.com/restic/restic/issues/3941
+ https://github.com/restic/restic/pull/4081
+
+ * Enhancement #4130: Cancel current command if cache becomes unusable
+
+ If the cache directory was removed or ran out of space while restic was running, this would
+ previously cause further caching attempts to fail and thereby drastically slow down the
+ command execution. Now, the currently running command is instead canceled.
+
+ https://github.com/restic/restic/issues/4130
+ https://github.com/restic/restic/pull/4166
+
+ * Enhancement #4159: Add `--human-readable` option to `ls` and `find` commands
+
+ Previously, when using the `-l` option with the `ls` and `find` commands, the displayed size
+ was always in bytes, without an option for a more human readable format such as MiB or GiB.
+
+ The new `--human-readable` option will convert longer size values into more human friendly
+ values with an appropriate suffix depending on the output size. For example, a size of
+ `14680064` will be shown as `14.000 MiB`.
+
+ https://github.com/restic/restic/issues/4159
+ https://github.com/restic/restic/pull/4351
+
+ * Enhancement #4188: Include restic version in snapshot metadata
+
+ The restic version used to backup a snapshot is now included in its metadata and shown when
+ inspecting a snapshot using `restic cat snapshot <snapshotID>` or `restic snapshots
+ --json`.
+
+ https://github.com/restic/restic/issues/4188
+ https://github.com/restic/restic/pull/4378
+
+ * Enhancement #4220: Add `jq` binary to Docker image
+
+ The Docker image now contains `jq`, which can be useful to process JSON data output by restic.
+
+ https://github.com/restic/restic/pull/4220
+
+ * Enhancement #4226: Allow specifying region of new buckets in the `gs` backend
+
+ Previously, buckets used by the Google Cloud Storage backend would always get created in the
+ "us" region. It is now possible to specify the region where a bucket should be created by using
+ the `-o gs.region=us` option.
+
+ https://github.com/restic/restic/pull/4226
+
+ * Enhancement #4375: Add support for extended attributes on symlinks
+
+ Restic now supports extended attributes on symlinks when backing up, restoring, or
+ FUSE-mounting snapshots. This includes, for example, the `security.selinux` xattr on Linux
+ distributions that use SELinux.
+
+ https://github.com/restic/restic/issues/4375
+ https://github.com/restic/restic/pull/4379
+
+
Changelog for restic 0.15.2 (2023-04-24)
=======================================
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 4b4be0757..39a829337 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -58,6 +58,19 @@ Please be aware that the debug log file will contain potentially sensitive
things like file and directory names, so please either redact it before
uploading it somewhere or post only the parts that are really relevant.
+If restic gets stuck, please also include a stacktrace in the description.
+On non-Windows systems, you can send a SIGQUIT signal to restic or press
+`Ctrl-\` to achieve the same result. This causes restic to print a stacktrace
+and then exit immediatelly. This will not damage your repository, however,
+it might be necessary to manually clean up stale lock files using
+`restic unlock`.
+
+On Windows, please set the environment variable `RESTIC_DEBUG_STACKTRACE_SIGINT`
+to `true` and press `Ctrl-C` to create a stacktrace.
+
+If you think restic uses too much memory or a too large cache directory, then
+please include the output of `restic stats --mode debug`.
+
Development Environment
=======================
@@ -78,10 +91,40 @@ Then use the `go` tool to build restic:
$ ./restic version
restic 0.14.0-dev (compiled manually) compiled with go1.19 on linux/amd64
+To create a debug build use:
+
+ $ go build -tags debug ./cmd/restic
+
You can run all tests with the following command:
$ go test ./...
+
+Performance and Memory Usage Issues
+===================================
+
+Debug builds of restic support the `--block-profile`, `--cpu-profile`,
+`--mem-profile`, and `--trace-profile` options which collect performance data
+that later on can be analyzed using the go tools:
+
+ $ restic --cpu-profile . [...]
+ $ go tool pprof -http localhost:12345 cpu.pprof
+
+To analyze a trace profile use `go tool trace -http=localhost:12345 trace.out`.
+
+As the memory usage of restic changes over time, it may be useful to capture a
+snapshot of the current heap. This is possible using then `--listen-profile`
+option. Then while restic runs you can query and afterwards analyze the heap statistics.
+
+ $ restic --listen-profile localhost:12345 [...]
+ $ curl http://localhost:12345/debug/pprof/heap -o heap.pprof
+ $ go tool pprof -http localhost:12345 heap.pprof
+
+Further useful tools are setting the environment variable `GODEBUG=gctrace=1`,
+which provides information about garbage collector runs. For a graphical variant
+combine this with gcvis.
+
+
Providing Patches
=================
diff --git a/VERSION b/VERSION
index 4312e0d0c..04a373efe 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-0.15.2
+0.16.0
diff --git a/build.go b/build.go
index dddc3b964..b3b7f5eee 100644
--- a/build.go
+++ b/build.go
@@ -380,6 +380,12 @@ func main() {
}
}
+ solarisMinVersion := GoVersion{Major: 1, Minor: 20, Patch: 0}
+ if env["GOARCH"] == "solaris" && !goVersion.AtLeast(solarisMinVersion) {
+ fmt.Fprintf(os.Stderr, "Detected version %s is too old, restic requires at least %s for Solaris\n", goVersion, solarisMinVersion)
+ os.Exit(1)
+ }
+
verbosePrintf("detected Go version %v\n", goVersion)
preserveSymbols := false
diff --git a/changelog/0.16.0_2023-07-31/issue-1495 b/changelog/0.16.0_2023-07-31/issue-1495
new file mode 100644
index 000000000..b29f0d711
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/issue-1495
@@ -0,0 +1,7 @@
+Enhancement: Sort snapshots by timestamp in `restic find`
+
+The `find` command used to print snapshots in an arbitrary order. Restic now
+prints snapshots sorted by timestamp.
+
+https://github.com/restic/restic/issues/1495
+https://github.com/restic/restic/pull/4409
diff --git a/changelog/0.16.0_2023-07-31/issue-1759 b/changelog/0.16.0_2023-07-31/issue-1759
new file mode 100644
index 000000000..6717dfe66
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/issue-1759
@@ -0,0 +1,21 @@
+Enhancement: Add `repair index` and `repair snapshots` commands
+
+The `rebuild-index` command has been renamed to `repair index`. The old name
+will still work, but is deprecated.
+
+When a snapshot was damaged, the only option up to now was to completely forget
+the snapshot, even if only some unimportant files in it were damaged and other
+files were still fine.
+
+Restic now has a `repair snapshots` command, which can salvage any non-damaged
+files and parts of files in the snapshots by removing damaged directories and
+missing file contents. Please note that the damaged data may still be lost
+and see the "Troubleshooting" section in the documentation for more details.
+
+https://github.com/restic/restic/issues/1759
+https://github.com/restic/restic/issues/1714
+https://github.com/restic/restic/issues/1798
+https://github.com/restic/restic/issues/2334
+https://github.com/restic/restic/pull/2876
+https://forum.restic.net/t/corrupted-repo-how-to-repair/799
+https://forum.restic.net/t/recovery-options-for-damaged-repositories/1571
diff --git a/changelog/0.16.0_2023-07-31/issue-1926 b/changelog/0.16.0_2023-07-31/issue-1926
new file mode 100644
index 000000000..96bc16240
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/issue-1926
@@ -0,0 +1,8 @@
+Enhancement: Allow certificate paths to be passed through environment variables
+
+Restic will now read paths to certificates from the environment variables
+`RESTIC_CACERT` or `RESTIC_TLS_CLIENT_CERT` if `--cacert` or `--tls-client-cert`
+are not specified.
+
+https://github.com/restic/restic/issues/1926
+https://github.com/restic/restic/pull/4384
diff --git a/changelog/0.16.0_2023-07-31/issue-2359 b/changelog/0.16.0_2023-07-31/issue-2359
new file mode 100644
index 000000000..9a62aedfb
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/issue-2359
@@ -0,0 +1,11 @@
+Enhancement: Provide multi-platform Docker images
+
+The official Docker images are now built for the architectures linux/386,
+linux/amd64, linux/arm and linux/arm64.
+
+As an alternative to the Docker Hub, the Docker images are also
+available on ghcr.io, the GitHub Container Registry.
+
+https://github.com/restic/restic/issues/2359
+https://github.com/restic/restic/issues/4269
+https://github.com/restic/restic/pull/4364
diff --git a/changelog/0.16.0_2023-07-31/issue-2468 b/changelog/0.16.0_2023-07-31/issue-2468
new file mode 100644
index 000000000..512c5e76a
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/issue-2468
@@ -0,0 +1,10 @@
+Enhancement: Add support for non-global Azure clouds
+
+The `azure` backend previously only supported storages using the global domain
+`core.windows.net`. This meant that backups to other domains such as Azure
+China (`core.chinacloudapi.cn`) or Azure Germany (`core.cloudapi.de`) were
+not supported. Restic now allows overriding the global domain using the
+environment variable `AZURE_ENDPOINT_SUFFIX`.
+
+https://github.com/restic/restic/issues/2468
+https://github.com/restic/restic/pull/4387
diff --git a/changelog/0.16.0_2023-07-31/issue-2565 b/changelog/0.16.0_2023-07-31/issue-2565
new file mode 100644
index 000000000..d7d5a25eb
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/issue-2565
@@ -0,0 +1,10 @@
+Bugfix: Support "unlimited" in `forget --keep-*` options
+
+Restic would previously forget snapshots that should have been kept when a
+negative value was passed to the `--keep-*` options. Negative values are now
+forbidden. To keep all snapshots, the special value `unlimited` is now
+supported. For example, `--keep-monthly unlimited` will keep all monthly
+snapshots.
+
+https://github.com/restic/restic/issues/2565
+https://github.com/restic/restic/pull/4234
diff --git a/changelog/0.16.0_2023-07-31/issue-3311 b/changelog/0.16.0_2023-07-31/issue-3311
new file mode 100644
index 000000000..af619b157
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/issue-3311
@@ -0,0 +1,12 @@
+Bugfix: Support non-UTF8 paths as symlink target
+
+Earlier restic versions did not correctly `backup` and `restore` symlinks that
+contain a non-UTF8 target. Note that this only affected systems that still use
+a non-Unicode encoding for filesystem paths.
+
+The repository format is now extended to add support for such symlinks. Please
+note that snapshots must have been created with at least restic version 0.16.0
+for `restore` to correctly handle non-UTF8 symlink targets when restoring them.
+
+https://github.com/restic/restic/issues/3311
+https://github.com/restic/restic/pull/3802
diff --git a/changelog/0.16.0_2023-07-31/issue-3328 b/changelog/0.16.0_2023-07-31/issue-3328
new file mode 100644
index 000000000..a79a4818b
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/issue-3328
@@ -0,0 +1,9 @@
+Enhancement: Reduce memory usage by up to 25%
+
+The in-memory index has been optimized to be more garbage collection friendly.
+Restic now defaults to `GOGC=50` to run the Go garbage collector more
+frequently.
+
+https://github.com/restic/restic/issues/3328
+https://github.com/restic/restic/pull/4352
+https://github.com/restic/restic/pull/4353
diff --git a/changelog/0.16.0_2023-07-31/issue-3397 b/changelog/0.16.0_2023-07-31/issue-3397
new file mode 100644
index 000000000..31c5e19fb
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/issue-3397
@@ -0,0 +1,11 @@
+Enhancement: Improve accuracy of ETA displayed during backup
+
+Restic's `backup` command displayed an ETA that did not adapt when the rate of
+progress made during the backup changed during the course of the backup.
+
+Restic now uses recent progress when computing the ETA. It is important to
+realize that the estimate may still be wrong, because restic cannot predict
+the future, but the hope is that the ETA will be more accurate in most cases.
+
+https://github.com/restic/restic/issues/3397
+https://github.com/restic/restic/pull/3563
diff --git a/changelog/0.16.0_2023-07-31/issue-3624 b/changelog/0.16.0_2023-07-31/issue-3624
new file mode 100644
index 000000000..a2a11d2d4
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/issue-3624
@@ -0,0 +1,9 @@
+Enhancement: Keep oldest snapshot when there are not enough snapshots
+
+The `forget` command now additionally preserves the oldest snapshot if fewer
+snapshots than allowed by the `--keep-*` parameters would otherwise be kept.
+This maximizes the amount of history kept within the specified limits.
+
+https://github.com/restic/restic/issues/3624
+https://github.com/restic/restic/pull/4366
+https://forum.restic.net/t/keeping-yearly-snapshots-policy-when-backup-began-during-the-year/4670/2
diff --git a/changelog/0.16.0_2023-07-31/issue-3698 b/changelog/0.16.0_2023-07-31/issue-3698
new file mode 100644
index 000000000..95fc6bd69
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/issue-3698
@@ -0,0 +1,8 @@
+Enhancement: Add support for Managed / Workload Identity to `azure` backend
+
+Restic now additionally supports authenticating to Azure using Workload
+Identity or Managed Identity credentials, which are automatically injected in
+several environments such as a managed Kubernetes cluster.
+
+https://github.com/restic/restic/issues/3698
+https://github.com/restic/restic/pull/4029
diff --git a/changelog/0.16.0_2023-07-31/issue-3871 b/changelog/0.16.0_2023-07-31/issue-3871
new file mode 100644
index 000000000..45131bc19
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/issue-3871
@@ -0,0 +1,22 @@
+Enhancement: Support `<snapshot>:<subfolder>` syntax to select subfolders
+
+Commands like `diff` or `restore` always worked with the full snapshot. This
+did not allow comparing only a specific subfolder or only restoring that folder
+(`restore --include subfolder` filters the restored files, but still creates the
+directories included in `subfolder`).
+
+The commands `diff`, `dump`, `ls` and `restore` now support the
+`<snapshot>:<subfolder>` syntax, where `snapshot` is the ID of a snapshot (or
+the string `latest`) and `subfolder` is a path within the snapshot. The
+commands will then only work with the specified path of the snapshot. The
+`subfolder` must be a path to a folder as returned by `ls`. Two examples:
+
+`restic restore -t target latest:/some/path`
+`restic diff 12345678:/some/path 90abcef:/some/path`
+
+For debugging purposes, the `cat` command now supports `cat tree
+<snapshot>:<subfolder>` to return the directory metadata for the given
+subfolder.
+
+https://github.com/restic/restic/issues/3871
+https://github.com/restic/restic/pull/4334
diff --git a/changelog/0.16.0_2023-07-31/issue-3941 b/changelog/0.16.0_2023-07-31/issue-3941
new file mode 100644
index 000000000..ff56d52cc
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/issue-3941
@@ -0,0 +1,17 @@
+Enhancement: Support `--group-by` for backup parent selection
+
+Previously, the `backup` command by default selected the parent snapshot based
+on the hostname and the backup targets. When the backup path list changed, the
+`backup` command was unable to determine a suitable parent snapshot and had to
+read all files again.
+
+The new `--group-by` option for the `backup` command allows filtering snapshots
+for the parent selection by `host`, `paths` and `tags`. It defaults to
+`host,paths` which selects the latest snapshot with hostname and paths matching
+those of the backup run. This matches the behavior of prior restic versions.
+
+The new `--group-by` option should be set to the same value as passed to
+`forget --group-by`.
+
+https://github.com/restic/restic/issues/3941
+https://github.com/restic/restic/pull/4081
diff --git a/changelog/0.16.0_2023-07-31/issue-4130 b/changelog/0.16.0_2023-07-31/issue-4130
new file mode 100644
index 000000000..eeebf2c62
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/issue-4130
@@ -0,0 +1,9 @@
+Enhancement: Cancel current command if cache becomes unusable
+
+If the cache directory was removed or ran out of space while restic was
+running, this would previously cause further caching attempts to fail and
+thereby drastically slow down the command execution. Now, the currently running
+command is instead canceled.
+
+https://github.com/restic/restic/issues/4130
+https://github.com/restic/restic/pull/4166
diff --git a/changelog/0.16.0_2023-07-31/issue-4159 b/changelog/0.16.0_2023-07-31/issue-4159
new file mode 100644
index 000000000..4ef2fa846
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/issue-4159
@@ -0,0 +1,12 @@
+Enhancement: Add `--human-readable` option to `ls` and `find` commands
+
+Previously, when using the `-l` option with the `ls` and `find` commands, the
+displayed size was always in bytes, without an option for a more human readable
+format such as MiB or GiB.
+
+The new `--human-readable` option will convert longer size values into more
+human friendly values with an appropriate suffix depending on the output size.
+For example, a size of `14680064` will be shown as `14.000 MiB`.
+
+https://github.com/restic/restic/issues/4159
+https://github.com/restic/restic/pull/4351
diff --git a/changelog/0.16.0_2023-07-31/issue-4188 b/changelog/0.16.0_2023-07-31/issue-4188
new file mode 100644
index 000000000..9bd5e6aca
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/issue-4188
@@ -0,0 +1,8 @@
+Enhancement: Include restic version in snapshot metadata
+
+The restic version used to backup a snapshot is now included in its metadata
+and shown when inspecting a snapshot using `restic cat snapshot <snapshotID>`
+or `restic snapshots --json`.
+
+https://github.com/restic/restic/issues/4188
+https://github.com/restic/restic/pull/4378
diff --git a/changelog/0.16.0_2023-07-31/issue-4199 b/changelog/0.16.0_2023-07-31/issue-4199
new file mode 100644
index 000000000..407fa43c6
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/issue-4199
@@ -0,0 +1,9 @@
+Bugfix: Avoid lock refresh issues on slow network connections
+
+On network connections with a low upload speed, backups and other operations
+could fail with the error message `Fatal: failed to refresh lock in time`.
+
+This has now been fixed by reworking the lock refresh handling.
+
+https://github.com/restic/restic/issues/4199
+https://github.com/restic/restic/pull/4304
diff --git a/changelog/0.16.0_2023-07-31/issue-426 b/changelog/0.16.0_2023-07-31/issue-426
new file mode 100644
index 000000000..f50cddbf7
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/issue-426
@@ -0,0 +1,14 @@
+Enhancement: Show progress bar during restore
+
+The `restore` command now shows a progress report while restoring files.
+
+Example: `[0:42] 5.76% 23 files 12.98 MiB, total 3456 files 23.54 GiB`
+
+JSON output is now also supported.
+
+https://github.com/restic/restic/issues/426
+https://github.com/restic/restic/issues/3413
+https://github.com/restic/restic/issues/3627
+https://github.com/restic/restic/pull/3991
+https://github.com/restic/restic/pull/4314
+https://forum.restic.net/t/progress-bar-for-restore/5210
diff --git a/changelog/0.16.0_2023-07-31/issue-4274 b/changelog/0.16.0_2023-07-31/issue-4274
new file mode 100644
index 000000000..01c5f2177
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/issue-4274
@@ -0,0 +1,11 @@
+Bugfix: Improve lock refresh handling after standby
+
+If the restic process was stopped or the host running restic entered standby
+during a long running operation such as a backup, this previously resulted in
+the operation failing with `Fatal: failed to refresh lock in time`.
+
+This has now been fixed such that restic first checks whether it is safe to
+continue the current operation and only throws an error if not.
+
+https://github.com/restic/restic/issues/4274
+https://github.com/restic/restic/pull/4374
diff --git a/changelog/0.16.0_2023-07-31/issue-4375 b/changelog/0.16.0_2023-07-31/issue-4375
new file mode 100644
index 000000000..6ce68c2ba
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/issue-4375
@@ -0,0 +1,8 @@
+Enhancement: Add support for extended attributes on symlinks
+
+Restic now supports extended attributes on symlinks when backing up,
+restoring, or FUSE-mounting snapshots. This includes, for example, the
+`security.selinux` xattr on Linux distributions that use SELinux.
+
+https://github.com/restic/restic/issues/4375
+https://github.com/restic/restic/pull/4379
diff --git a/changelog/0.16.0_2023-07-31/issue-719 b/changelog/0.16.0_2023-07-31/issue-719
new file mode 100644
index 000000000..897a6bf1f
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/issue-719
@@ -0,0 +1,8 @@
+Enhancement: Add `--retry-lock` option
+
+This option allows specifying a duration for which restic will wait if the
+repository is already locked.
+
+https://github.com/restic/restic/issues/719
+https://github.com/restic/restic/pull/2214
+https://github.com/restic/restic/pull/4107
diff --git a/changelog/0.16.0_2023-07-31/pull-3261 b/changelog/0.16.0_2023-07-31/pull-3261
new file mode 100644
index 000000000..f7073ed7b
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/pull-3261
@@ -0,0 +1,8 @@
+Enhancement: Reduce file fragmentation for local backend
+
+Before this change, local backend files could become fragmented.
+Now restic will try to preallocate space for pack files to avoid
+their fragmentation.
+
+https://github.com/restic/restic/issues/2679
+https://github.com/restic/restic/pull/3261
diff --git a/changelog/0.16.0_2023-07-31/pull-4176 b/changelog/0.16.0_2023-07-31/pull-4176
new file mode 100644
index 000000000..8adf0b40f
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/pull-4176
@@ -0,0 +1,7 @@
+Change: Fix JSON message type of `scan_finished` for the `backup` command
+
+Restic incorrectly set the `message_type` of the `scan_finished` message to
+`status` instead of `verbose_status`. This has now been corrected so that
+the messages report the correct type.
+
+https://github.com/restic/restic/pull/4176
diff --git a/changelog/0.16.0_2023-07-31/pull-4201 b/changelog/0.16.0_2023-07-31/pull-4201
new file mode 100644
index 000000000..8e4135f39
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/pull-4201
@@ -0,0 +1,7 @@
+Change: Require Go 1.20 for Solaris builds
+
+Building restic on Solaris now requires Go 1.20, as the library used to access
+Azure uses the mmap syscall, which is only available on Solaris starting from
+Go 1.20. All other platforms however continue to build with Go 1.18.
+
+https://github.com/restic/restic/pull/4201
diff --git a/changelog/0.16.0_2023-07-31/pull-4220 b/changelog/0.16.0_2023-07-31/pull-4220
new file mode 100644
index 000000000..e832adf49
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/pull-4220
@@ -0,0 +1,6 @@
+Enhancement: Add `jq` binary to Docker image
+
+The Docker image now contains `jq`, which can be useful to process JSON data
+output by restic.
+
+https://github.com/restic/restic/pull/4220
diff --git a/changelog/0.16.0_2023-07-31/pull-4226 b/changelog/0.16.0_2023-07-31/pull-4226
new file mode 100644
index 000000000..1e04b35fb
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/pull-4226
@@ -0,0 +1,7 @@
+Enhancement: Allow specifying region of new buckets in the `gs` backend
+
+Previously, buckets used by the Google Cloud Storage backend would always get
+created in the "us" region. It is now possible to specify the region where a
+bucket should be created by using the `-o gs.region=us` option.
+
+https://github.com/restic/restic/pull/4226
diff --git a/changelog/0.16.0_2023-07-31/pull-4318 b/changelog/0.16.0_2023-07-31/pull-4318
new file mode 100644
index 000000000..f13525f96
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/pull-4318
@@ -0,0 +1,8 @@
+Bugfix: Correctly clean up status bar output of the `backup` command
+
+Due to a regression in restic 0.15.2, the status bar of the `backup` command
+could leave some output behind. This happened if filenames were printed that
+are wider than the current terminal width. This has now been fixed.
+
+https://github.com/restic/restic/issues/4319
+https://github.com/restic/restic/pull/4318
diff --git a/changelog/0.16.0_2023-07-31/pull-4333 b/changelog/0.16.0_2023-07-31/pull-4333
new file mode 100644
index 000000000..329967337
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/pull-4333
@@ -0,0 +1,3 @@
+Bugfix: `generate` and `init` no longer silently ignore unexpected arguments
+
+https://github.com/restic/restic/pull/4333
diff --git a/changelog/0.16.0_2023-07-31/pull-4400 b/changelog/0.16.0_2023-07-31/pull-4400
new file mode 100644
index 000000000..bce09842f
--- /dev/null
+++ b/changelog/0.16.0_2023-07-31/pull-4400
@@ -0,0 +1,8 @@
+Bugfix: Ignore missing folders in `rest` backend
+
+If a repository accessed via the REST backend was missing folders, then restic
+would fail with an error while trying to list the data in the repository. This
+has been now fixed.
+
+https://github.com/restic/restic/pull/4400
+https://github.com/restic/rest-server/issues/235
diff --git a/changelog/TEMPLATE b/changelog/TEMPLATE
index d512a2dc3..9304359b3 100644
--- a/changelog/TEMPLATE
+++ b/changelog/TEMPLATE
@@ -1,16 +1,17 @@
# The first line must start with Bugfix:, Enhancement: or Change:,
-# including the colon. Use present tense. Remove lines starting with '#'
-# from this template.
+# including the colon. Use present tense and the imperative mood. Remove
+# lines starting with '#' from this template.
Enhancement: Allow custom bar in the foo command
# Describe the problem in the past tense, the new behavior in the present
# tense. Mention the affected commands, backends, operating systems, etc.
# Focus on user-facing behavior, not the implementation.
+# Use "Restic now ..." instead of "We have changed ...".
Restic foo always used the system-wide bar when deciding how to frob an
-item in the baz backend. It now permits selecting the bar with --bar or
-the environment variable RESTIC_BAR. The system-wide bar is still the
-default.
+item in the `baz` backend. It now permits selecting the bar with `--bar`
+or the environment variable `RESTIC_BAR`. The system-wide bar is still
+the default.
# The last section is a list of issue, PR and forum URLs.
# The first issue ID determines the filename for the changelog entry:
diff --git a/cmd/restic/cleanup.go b/cmd/restic/cleanup.go
index 61af72802..75933fe96 100644
--- a/cmd/restic/cleanup.go
+++ b/cmd/restic/cleanup.go
@@ -62,6 +62,12 @@ func CleanupHandler(c <-chan os.Signal) {
debug.Log("signal %v received, cleaning up", s)
Warnf("%ssignal %v received, cleaning up\n", clearLine(0), s)
+ if val, _ := os.LookupEnv("RESTIC_DEBUG_STACKTRACE_SIGINT"); val != "" {
+ _, _ = os.Stderr.WriteString("\n--- STACKTRACE START ---\n\n")
+ _, _ = os.Stderr.WriteString(debug.DumpStacktrace())
+ _, _ = os.Stderr.WriteString("\n--- STACKTRACE END ---\n")
+ }
+
code := 0
if s == syscall.SIGINT {
@@ -78,5 +84,6 @@ func CleanupHandler(c <-chan os.Signal) {
// given exit code.
func Exit(code int) {
code = RunCleanupHandlers(code)
+ debug.Log("exiting with status code %d", code)
os.Exit(code)
}
diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go
index ec901828b..6b5706855 100644
--- a/cmd/restic/cmd_backup.go
+++ b/cmd/restic/cmd_backup.go
@@ -89,6 +89,7 @@ type BackupOptions struct {
excludePatternOptions
Parent string
+ GroupBy restic.SnapshotGroupByOptions
Force bool
ExcludeOtherFS bool
ExcludeIfPresent []string
@@ -120,7 +121,9 @@ func init() {
cmdRoot.AddCommand(cmdBackup)
f := cmdBackup.Flags()
- f.StringVar(&backupOptions.Parent, "parent", "", "use this parent `snapshot` (default: last snapshot in the repository that has the same target files/directories, and is not newer than the snapshot time)")
+ f.StringVar(&backupOptions.Parent, "parent", "", "use this parent `snapshot` (default: latest snapshot in the group determined by --group-by and not newer than the timestamp determined by --time)")
+ backupOptions.GroupBy = restic.SnapshotGroupByOptions{Host: true, Path: true}
+ f.VarP(&backupOptions.GroupBy, "group-by", "g", "`group` snapshots by host, paths and/or tags, separated by comma (disable grouping with '')")
f.BoolVarP(&backupOptions.Force, "force", "f", false, `force re-reading the target files/directories (overrides the "parent" flag)`)
initExcludePatternOptions(f, &backupOptions.excludePatternOptions)
@@ -305,7 +308,7 @@ func (opts BackupOptions) Check(gopts GlobalOptions, args []string) error {
// collectRejectByNameFuncs returns a list of all functions which may reject data
// from being saved in a snapshot based on path only
-func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository, targets []string) (fs []RejectByNameFunc, err error) {
+func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository) (fs []RejectByNameFunc, err error) {
// exclude restic cache
if repo.Cache != nil {
f, err := rejectResticCache(repo)
@@ -340,7 +343,7 @@ func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository, t
// collectRejectFuncs returns a list of all functions which may reject data
// from being saved in a snapshot based on path and file info
-func collectRejectFuncs(opts BackupOptions, repo *repository.Repository, targets []string) (fs []RejectFunc, err error) {
+func collectRejectFuncs(opts BackupOptions, targets []string) (fs []RejectFunc, err error) {
// allowed devices
if opts.ExcludeOtherFS && !opts.Stdin {
f, err := rejectByDevice(targets)
@@ -439,13 +442,18 @@ func findParentSnapshot(ctx context.Context, repo restic.Repository, opts Backup
if snName == "" {
snName = "latest"
}
- f := restic.SnapshotFilter{
- Hosts: []string{opts.Host},
- Paths: targets,
- TimestampLimit: timeStampLimit,
+ f := restic.SnapshotFilter{TimestampLimit: timeStampLimit}
+ if opts.GroupBy.Host {
+ f.Hosts = []string{opts.Host}
+ }
+ if opts.GroupBy.Path {
+ f.Paths = targets
+ }
+ if opts.GroupBy.Tag {
+ f.Tags = []restic.TagList{opts.Tags.Flatten()}
}
- sn, err := f.FindLatest(ctx, repo.Backend(), repo, snName)
+ sn, _, err := f.FindLatest(ctx, repo.Backend(), repo, snName)
// Snapshot not found is ok if no explicit parent was set
if opts.Parent == "" && errors.Is(err, restic.ErrNoSnapshotFound) {
err = nil
@@ -498,20 +506,23 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
if !gopts.JSON {
progressPrinter.V("lock repository")
}
- lock, ctx, err := lockRepo(ctx, repo)
- defer unlockRepo(lock)
- if err != nil {
- return err
+ if !opts.DryRun {
+ var lock *restic.Lock
+ lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
+ defer unlockRepo(lock)
+ if err != nil {
+ return err
+ }
}
// rejectByNameFuncs collect functions that can reject items from the backup based on path only
- rejectByNameFuncs, err := collectRejectByNameFuncs(opts, repo, targets)
+ rejectByNameFuncs, err := collectRejectByNameFuncs(opts, repo)
if err != nil {
return err
}
// rejectFuncs collect functions that can reject items from the backup based on path and file info
- rejectFuncs, err := collectRejectFuncs(opts, repo, targets)
+ rejectFuncs, err := collectRejectFuncs(opts, targets)
if err != nil {
return err
}
@@ -637,6 +648,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
Time: timeStamp,
Hostname: opts.Host,
ParentSnapshot: parentSnapshot,
+ ProgramVersion: "restic " + version,
}
if !gopts.JSON {
diff --git a/cmd/restic/cmd_backup_integration_test.go b/cmd/restic/cmd_backup_integration_test.go
new file mode 100644
index 000000000..fb7bef633
--- /dev/null
+++ b/cmd/restic/cmd_backup_integration_test.go
@@ -0,0 +1,569 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "runtime"
+ "testing"
+
+ "github.com/restic/restic/internal/fs"
+ "github.com/restic/restic/internal/restic"
+ rtest "github.com/restic/restic/internal/test"
+ "github.com/restic/restic/internal/ui/termstatus"
+)
+
+func testRunBackupAssumeFailure(t testing.TB, dir string, target []string, opts BackupOptions, gopts GlobalOptions) error {
+ return withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error {
+ t.Logf("backing up %v in %v", target, dir)
+ if dir != "" {
+ cleanup := rtest.Chdir(t, dir)
+ defer cleanup()
+ }
+
+ opts.GroupBy = restic.SnapshotGroupByOptions{Host: true, Path: true}
+ return runBackup(ctx, opts, gopts, term, target)
+ })
+}
+
+func testRunBackup(t testing.TB, dir string, target []string, opts BackupOptions, gopts GlobalOptions) {
+ err := testRunBackupAssumeFailure(t, dir, target, opts, gopts)
+ rtest.Assert(t, err == nil, "Error while backing up")
+}
+
+func TestBackup(t *testing.T) {
+ testBackup(t, false)
+}
+
+func TestBackupWithFilesystemSnapshots(t *testing.T) {
+ if runtime.GOOS == "windows" && fs.HasSufficientPrivilegesForVSS() == nil {
+ testBackup(t, true)
+ }
+}
+
+func testBackup(t *testing.T, useFsSnapshot bool) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testSetupBackupData(t, env)
+ opts := BackupOptions{UseFsSnapshot: useFsSnapshot}
+
+ // first backup
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
+ testListSnapshots(t, env.gopts, 1)
+
+ testRunCheck(t, env.gopts)
+ stat1 := dirStats(env.repo)
+
+ // second backup, implicit incremental
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
+ snapshotIDs := testListSnapshots(t, env.gopts, 2)
+
+ stat2 := dirStats(env.repo)
+ if stat2.size > stat1.size+stat1.size/10 {
+ t.Error("repository size has grown by more than 10 percent")
+ }
+ t.Logf("repository grown by %d bytes", stat2.size-stat1.size)
+
+ testRunCheck(t, env.gopts)
+ // third backup, explicit incremental
+ opts.Parent = snapshotIDs[0].String()
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
+ snapshotIDs = testListSnapshots(t, env.gopts, 3)
+
+ stat3 := dirStats(env.repo)
+ if stat3.size > stat1.size+stat1.size/10 {
+ t.Error("repository size has grown by more than 10 percent")
+ }
+ t.Logf("repository grown by %d bytes", stat3.size-stat2.size)
+
+ // restore all backups and compare
+ for i, snapshotID := range snapshotIDs {
+ restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
+ t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir)
+ testRunRestore(t, env.gopts, restoredir, snapshotID)
+ diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata"))
+ rtest.Assert(t, diff == "", "directories are not equal: %v", diff)
+ }
+
+ testRunCheck(t, env.gopts)
+}
+
+func TestBackupWithRelativePath(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testSetupBackupData(t, env)
+ opts := BackupOptions{}
+
+ // first backup
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
+ firstSnapshotID := testListSnapshots(t, env.gopts, 1)[0]
+
+ // second backup, implicit incremental
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
+
+ // that the correct parent snapshot was used
+ latestSn, _ := testRunSnapshots(t, env.gopts)
+ rtest.Assert(t, latestSn != nil, "missing latest snapshot")
+ rtest.Assert(t, latestSn.Parent != nil && latestSn.Parent.Equal(firstSnapshotID), "second snapshot selected unexpected parent %v instead of %v", latestSn.Parent, firstSnapshotID)
+}
+
+func TestBackupParentSelection(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testSetupBackupData(t, env)
+ opts := BackupOptions{}
+
+ // first backup
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata/0/0"}, opts, env.gopts)
+ firstSnapshotID := testListSnapshots(t, env.gopts, 1)[0]
+
+ // second backup, sibling path
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata/0/tests"}, opts, env.gopts)
+ testListSnapshots(t, env.gopts, 2)
+
+ // third backup, incremental for the first backup
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata/0/0"}, opts, env.gopts)
+
+ // test that the correct parent snapshot was used
+ latestSn, _ := testRunSnapshots(t, env.gopts)
+ rtest.Assert(t, latestSn != nil, "missing latest snapshot")
+ rtest.Assert(t, latestSn.Parent != nil && latestSn.Parent.Equal(firstSnapshotID), "third snapshot selected unexpected parent %v instead of %v", latestSn.Parent, firstSnapshotID)
+}
+
+func TestDryRunBackup(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testSetupBackupData(t, env)
+ opts := BackupOptions{}
+ dryOpts := BackupOptions{DryRun: true}
+
+ // dry run before first backup
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, dryOpts, env.gopts)
+ snapshotIDs := testListSnapshots(t, env.gopts, 0)
+ packIDs := testRunList(t, "packs", env.gopts)
+ rtest.Assert(t, len(packIDs) == 0,
+ "expected no data, got %v", snapshotIDs)
+ indexIDs := testRunList(t, "index", env.gopts)
+ rtest.Assert(t, len(indexIDs) == 0,
+ "expected no index, got %v", snapshotIDs)
+
+ // first backup
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
+ snapshotIDs = testListSnapshots(t, env.gopts, 1)
+ packIDs = testRunList(t, "packs", env.gopts)
+ indexIDs = testRunList(t, "index", env.gopts)
+
+ // dry run between backups
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, dryOpts, env.gopts)
+ snapshotIDsAfter := testListSnapshots(t, env.gopts, 1)
+ rtest.Equals(t, snapshotIDs, snapshotIDsAfter)
+ dataIDsAfter := testRunList(t, "packs", env.gopts)
+ rtest.Equals(t, packIDs, dataIDsAfter)
+ indexIDsAfter := testRunList(t, "index", env.gopts)
+ rtest.Equals(t, indexIDs, indexIDsAfter)
+
+ // second backup, implicit incremental
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
+ snapshotIDs = testListSnapshots(t, env.gopts, 2)
+ packIDs = testRunList(t, "packs", env.gopts)
+ indexIDs = testRunList(t, "index", env.gopts)
+
+ // another dry run
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, dryOpts, env.gopts)
+ snapshotIDsAfter = testListSnapshots(t, env.gopts, 2)
+ rtest.Equals(t, snapshotIDs, snapshotIDsAfter)
+ dataIDsAfter = testRunList(t, "packs", env.gopts)
+ rtest.Equals(t, packIDs, dataIDsAfter)
+ indexIDsAfter = testRunList(t, "index", env.gopts)
+ rtest.Equals(t, indexIDs, indexIDsAfter)
+}
+
+func TestBackupNonExistingFile(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testSetupBackupData(t, env)
+
+ _ = withRestoreGlobalOptions(func() error {
+ globalOptions.stderr = io.Discard
+
+ p := filepath.Join(env.testdata, "0", "0", "9")
+ dirs := []string{
+ filepath.Join(p, "0"),
+ filepath.Join(p, "1"),
+ filepath.Join(p, "nonexisting"),
+ filepath.Join(p, "5"),
+ }
+
+ opts := BackupOptions{}
+
+ testRunBackup(t, "", dirs, opts, env.gopts)
+ return nil
+ })
+}
+
+func TestBackupSelfHealing(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ p := filepath.Join(env.testdata, "test/test")
+ rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
+ rtest.OK(t, appendRandomData(p, 5))
+
+ opts := BackupOptions{}
+
+ testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ // remove all data packs
+ removePacksExcept(env.gopts, t, restic.NewIDSet(), false)
+
+ testRunRebuildIndex(t, env.gopts)
+ // now the repo is also missing the data blob in the index; check should report this
+ testRunCheckMustFail(t, env.gopts)
+
+ // second backup should report an error but "heal" this situation
+ err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
+ rtest.Assert(t, err != nil,
+ "backup should have reported an error")
+ testRunCheck(t, env.gopts)
+}
+
+func TestBackupTreeLoadError(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+ p := filepath.Join(env.testdata, "test/test")
+ rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
+ rtest.OK(t, appendRandomData(p, 5))
+
+ opts := BackupOptions{}
+ // Backup a subdirectory first, such that we can remove the tree pack for the subdirectory
+ testRunBackup(t, env.testdata, []string{"test"}, opts, env.gopts)
+
+ r, err := OpenRepository(context.TODO(), env.gopts)
+ rtest.OK(t, err)
+ rtest.OK(t, r.LoadIndex(context.TODO()))
+ treePacks := restic.NewIDSet()
+ r.Index().Each(context.TODO(), func(pb restic.PackedBlob) {
+ if pb.Type == restic.TreeBlob {
+ treePacks.Insert(pb.PackID)
+ }
+ })
+
+ testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ // delete the subdirectory pack first
+ for id := range treePacks {
+ rtest.OK(t, r.Backend().Remove(context.TODO(), restic.Handle{Type: restic.PackFile, Name: id.String()}))
+ }
+ testRunRebuildIndex(t, env.gopts)
+ // now the repo is missing the tree blob in the index; check should report this
+ testRunCheckMustFail(t, env.gopts)
+ // second backup should report an error but "heal" this situation
+ err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
+ rtest.Assert(t, err != nil, "backup should have reported an error for the subdirectory")
+ testRunCheck(t, env.gopts)
+
+ // remove all tree packs
+ removePacksExcept(env.gopts, t, restic.NewIDSet(), true)
+ testRunRebuildIndex(t, env.gopts)
+ // now the repo is also missing the data blob in the index; check should report this
+ testRunCheckMustFail(t, env.gopts)
+ // second backup should report an error but "heal" this situation
+ err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
+ rtest.Assert(t, err != nil, "backup should have reported an error")
+ testRunCheck(t, env.gopts)
+}
+
+var backupExcludeFilenames = []string{
+ "testfile1",
+ "foo.tar.gz",
+ "private/secret/passwords.txt",
+ "work/source/test.c",
+}
+
+func TestBackupExclude(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ datadir := filepath.Join(env.base, "testdata")
+
+ for _, filename := range backupExcludeFilenames {
+ fp := filepath.Join(datadir, filename)
+ rtest.OK(t, os.MkdirAll(filepath.Dir(fp), 0755))
+
+ f, err := os.Create(fp)
+ rtest.OK(t, err)
+
+ fmt.Fprint(f, filename)
+ rtest.OK(t, f.Close())
+ }
+
+ snapshots := make(map[string]struct{})
+
+ opts := BackupOptions{}
+
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
+ snapshots, snapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
+ files := testRunLs(t, env.gopts, snapshotID)
+ rtest.Assert(t, includes(files, "/testdata/foo.tar.gz"),
+ "expected file %q in first snapshot, but it's not included", "foo.tar.gz")
+
+ opts.Excludes = []string{"*.tar.gz"}
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
+ snapshots, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
+ files = testRunLs(t, env.gopts, snapshotID)
+ rtest.Assert(t, !includes(files, "/testdata/foo.tar.gz"),
+ "expected file %q not in first snapshot, but it's included", "foo.tar.gz")
+
+ opts.Excludes = []string{"*.tar.gz", "private/secret"}
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
+ _, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
+ files = testRunLs(t, env.gopts, snapshotID)
+ rtest.Assert(t, !includes(files, "/testdata/foo.tar.gz"),
+ "expected file %q not in first snapshot, but it's included", "foo.tar.gz")
+ rtest.Assert(t, !includes(files, "/testdata/private/secret/passwords.txt"),
+ "expected file %q not in first snapshot, but it's included", "passwords.txt")
+}
+
+func TestBackupErrors(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ return
+ }
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testSetupBackupData(t, env)
+
+ // Assume failure
+ inaccessibleFile := filepath.Join(env.testdata, "0", "0", "9", "0")
+ rtest.OK(t, os.Chmod(inaccessibleFile, 0000))
+ defer func() {
+ rtest.OK(t, os.Chmod(inaccessibleFile, 0644))
+ }()
+ opts := BackupOptions{}
+ err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
+ rtest.Assert(t, err != nil, "Assumed failure, but no error occurred.")
+ rtest.Assert(t, err == ErrInvalidSourceData, "Wrong error returned")
+ testListSnapshots(t, env.gopts, 1)
+}
+
+const (
+ incrementalFirstWrite = 10 * 1042 * 1024
+ incrementalSecondWrite = 1 * 1042 * 1024
+ incrementalThirdWrite = 1 * 1042 * 1024
+)
+
+func TestIncrementalBackup(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ datadir := filepath.Join(env.base, "testdata")
+ testfile := filepath.Join(datadir, "testfile")
+
+ rtest.OK(t, appendRandomData(testfile, incrementalFirstWrite))
+
+ opts := BackupOptions{}
+
+ testRunBackup(t, "", []string{datadir}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+ stat1 := dirStats(env.repo)
+
+ rtest.OK(t, appendRandomData(testfile, incrementalSecondWrite))
+
+ testRunBackup(t, "", []string{datadir}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+ stat2 := dirStats(env.repo)
+ if stat2.size-stat1.size > incrementalFirstWrite {
+ t.Errorf("repository size has grown by more than %d bytes", incrementalFirstWrite)
+ }
+ t.Logf("repository grown by %d bytes", stat2.size-stat1.size)
+
+ rtest.OK(t, appendRandomData(testfile, incrementalThirdWrite))
+
+ testRunBackup(t, "", []string{datadir}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+ stat3 := dirStats(env.repo)
+ if stat3.size-stat2.size > incrementalFirstWrite {
+ t.Errorf("repository size has grown by more than %d bytes", incrementalFirstWrite)
+ }
+ t.Logf("repository grown by %d bytes", stat3.size-stat2.size)
+}
+
+func TestBackupTags(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testSetupBackupData(t, env)
+ opts := BackupOptions{}
+
+ testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+ newest, _ := testRunSnapshots(t, env.gopts)
+
+ if newest == nil {
+ t.Fatal("expected a backup, got nil")
+ }
+
+ rtest.Assert(t, len(newest.Tags) == 0,
+ "expected no tags, got %v", newest.Tags)
+ parent := newest
+
+ opts.Tags = restic.TagLists{[]string{"NL"}}
+ testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+ newest, _ = testRunSnapshots(t, env.gopts)
+
+ if newest == nil {
+ t.Fatal("expected a backup, got nil")
+ }
+
+ rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL",
+ "expected one NL tag, got %v", newest.Tags)
+ // Tagged backup should have untagged backup as parent.
+ rtest.Assert(t, parent.ID.Equal(*newest.Parent),
+ "expected parent to be %v, got %v", parent.ID, newest.Parent)
+}
+
+func TestBackupProgramVersion(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testSetupBackupData(t, env)
+ testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
+ newest, _ := testRunSnapshots(t, env.gopts)
+
+ if newest == nil {
+ t.Fatal("expected a backup, got nil")
+ }
+ resticVersion := "restic " + version
+ rtest.Assert(t, newest.ProgramVersion == resticVersion,
+ "expected %v, got %v", resticVersion, newest.ProgramVersion)
+}
+
+func TestQuietBackup(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testSetupBackupData(t, env)
+ opts := BackupOptions{}
+
+ env.gopts.Quiet = false
+ testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
+ testListSnapshots(t, env.gopts, 1)
+
+ testRunCheck(t, env.gopts)
+
+ env.gopts.Quiet = true
+ testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
+ testListSnapshots(t, env.gopts, 2)
+
+ testRunCheck(t, env.gopts)
+}
+
+func TestHardLink(t *testing.T) {
+ // this test assumes a test set with a single directory containing hard linked files
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := filepath.Join("testdata", "test.hl.tar.gz")
+ fd, err := os.Open(datafile)
+ if os.IsNotExist(err) {
+ t.Skipf("unable to find data file %q, skipping", datafile)
+ return
+ }
+ rtest.OK(t, err)
+ rtest.OK(t, fd.Close())
+
+ testRunInit(t, env.gopts)
+
+ rtest.SetupTarTestFixture(t, env.testdata, datafile)
+
+ linkTests := createFileSetPerHardlink(env.testdata)
+
+ opts := BackupOptions{}
+
+ // first backup
+ testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
+ snapshotIDs := testListSnapshots(t, env.gopts, 1)
+
+ testRunCheck(t, env.gopts)
+
+ // restore all backups and compare
+ for i, snapshotID := range snapshotIDs {
+ restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
+ t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir)
+ testRunRestore(t, env.gopts, restoredir, snapshotID)
+ diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata"))
+ rtest.Assert(t, diff == "", "directories are not equal %v", diff)
+
+ linkResults := createFileSetPerHardlink(filepath.Join(restoredir, "testdata"))
+ rtest.Assert(t, linksEqual(linkTests, linkResults),
+ "links are not equal")
+ }
+
+ testRunCheck(t, env.gopts)
+}
+
+func linksEqual(source, dest map[uint64][]string) bool {
+ for _, vs := range source {
+ found := false
+ for kd, vd := range dest {
+ if linkEqual(vs, vd) {
+ delete(dest, kd)
+ found = true
+ break
+ }
+ }
+ if !found {
+ return false
+ }
+ }
+
+ return len(dest) == 0
+}
+
+func linkEqual(source, dest []string) bool {
+ // equal if sliced are equal without considering order
+ if source == nil && dest == nil {
+ return true
+ }
+
+ if source == nil || dest == nil {
+ return false
+ }
+
+ if len(source) != len(dest) {
+ return false
+ }
+
+ for i := range source {
+ found := false
+ for j := range dest {
+ if source[i] == dest[j] {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/cmd/restic/cmd_cache.go b/cmd/restic/cmd_cache.go
index 334063fdc..4a10d1027 100644
--- a/cmd/restic/cmd_cache.go
+++ b/cmd/restic/cmd_cache.go
@@ -155,7 +155,7 @@ func runCache(opts CacheOptions, gopts GlobalOptions, args []string) error {
})
}
- _ = tab.Write(gopts.stdout)
+ _ = tab.Write(globalOptions.stdout)
Printf("%d cache dirs in %s\n", len(dirs), cachedir)
return nil
diff --git a/cmd/restic/cmd_cat.go b/cmd/restic/cmd_cat.go
index f46502d5a..7c4373812 100644
--- a/cmd/restic/cmd_cat.go
+++ b/cmd/restic/cmd_cat.go
@@ -13,7 +13,7 @@ import (
)
var cmdCat = &cobra.Command{
- Use: "cat [flags] [pack|blob|snapshot|index|key|masterkey|config|lock] ID",
+ Use: "cat [flags] [masterkey|config|pack ID|blob ID|snapshot ID|index ID|key ID|lock ID|tree snapshot:subfolder]",
Short: "Print internal objects to stdout",
Long: `
The "cat" command is used to print internal objects to stdout.
@@ -45,7 +45,7 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
if !gopts.NoLock {
var lock *restic.Lock
- lock, ctx, err = lockRepo(ctx, repo)
+ lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
@@ -55,7 +55,7 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
tpe := args[0]
var id restic.ID
- if tpe != "masterkey" && tpe != "config" && tpe != "snapshot" {
+ if tpe != "masterkey" && tpe != "config" && tpe != "snapshot" && tpe != "tree" {
id, err = restic.ParseID(args[1])
if err != nil {
return errors.Fatalf("unable to parse ID: %v\n", err)
@@ -72,7 +72,7 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
Println(string(buf))
return nil
case "index":
- buf, err := repo.LoadUnpacked(ctx, restic.IndexFile, id, nil)
+ buf, err := repo.LoadUnpacked(ctx, restic.IndexFile, id)
if err != nil {
return err
}
@@ -80,7 +80,7 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
Println(string(buf))
return nil
case "snapshot":
- sn, err := restic.FindSnapshot(ctx, repo.Backend(), repo, args[1])
+ sn, _, err := restic.FindSnapshot(ctx, repo.Backend(), repo, args[1])
if err != nil {
return errors.Fatalf("could not find snapshot: %v\n", err)
}
@@ -165,6 +165,29 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
return errors.Fatal("blob not found")
+ case "tree":
+ sn, subfolder, err := restic.FindSnapshot(ctx, repo.Backend(), repo, args[1])
+ if err != nil {
+ return errors.Fatalf("could not find snapshot: %v\n", err)
+ }
+
+ err = repo.LoadIndex(ctx)
+ if err != nil {
+ return err
+ }
+
+ sn.Tree, err = restic.FindTreeDirectory(ctx, repo, sn.Tree, subfolder)
+ if err != nil {
+ return err
+ }
+
+ buf, err := repo.LoadBlob(ctx, restic.TreeBlob, *sn.Tree, nil)
+ if err != nil {
+ return err
+ }
+ _, err = globalOptions.stdout.Write(buf)
+ return err
+
default:
return errors.Fatal("invalid type")
}
diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go
index be9dd5130..3c4c9daa9 100644
--- a/cmd/restic/cmd_check.go
+++ b/cmd/restic/cmd_check.go
@@ -16,6 +16,7 @@ import (
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/restic"
+ "github.com/restic/restic/internal/ui"
)
var cmdCheck = &cobra.Command{
@@ -65,7 +66,7 @@ func init() {
// MarkDeprecated only returns an error when the flag is not found
panic(err)
}
- f.BoolVar(&checkOptions.WithCache, "with-cache", false, "use the cache")
+ f.BoolVar(&checkOptions.WithCache, "with-cache", false, "use existing cache, only read uncached data from repository")
}
func checkFlags(opts CheckOptions) error {
@@ -97,7 +98,7 @@ func checkFlags(opts CheckOptions) error {
}
} else {
- fileSize, err := parseSizeStr(opts.ReadDataSubset)
+ fileSize, err := ui.ParseBytes(opts.ReadDataSubset)
if err != nil {
return argumentError
}
@@ -211,7 +212,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
if !gopts.NoLock {
Verbosef("create exclusive lock for repository\n")
var lock *restic.Lock
- lock, ctx, err = lockRepoExclusive(ctx, repo)
+ lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
@@ -245,7 +246,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
}
if suggestIndexRebuild {
- Printf("Duplicate packs/old indexes are non-critical, you can run `restic rebuild-index' to correct this.\n")
+ Printf("Duplicate packs/old indexes are non-critical, you can run `restic repair index' to correct this.\n")
}
if mixedFound {
Printf("Mixed packs with tree and data blobs are non-critical, you can run `restic prune` to correct this.\n")
@@ -363,7 +364,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
if repoSize == 0 {
return errors.Fatal("Cannot read from a repository having size 0")
}
- subsetSize, _ := parseSizeStr(opts.ReadDataSubset)
+ subsetSize, _ := ui.ParseBytes(opts.ReadDataSubset)
if subsetSize > repoSize {
subsetSize = repoSize
}
diff --git a/cmd/restic/cmd_check_integration_test.go b/cmd/restic/cmd_check_integration_test.go
new file mode 100644
index 000000000..9eb4fec62
--- /dev/null
+++ b/cmd/restic/cmd_check_integration_test.go
@@ -0,0 +1,34 @@
+package main
+
+import (
+ "context"
+ "testing"
+
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func testRunCheck(t testing.TB, gopts GlobalOptions) {
+ t.Helper()
+ output, err := testRunCheckOutput(gopts, true)
+ if err != nil {
+ t.Error(output)
+ t.Fatalf("unexpected error: %+v", err)
+ }
+}
+
+func testRunCheckMustFail(t testing.TB, gopts GlobalOptions) {
+ t.Helper()
+ _, err := testRunCheckOutput(gopts, false)
+ rtest.Assert(t, err != nil, "expected non nil error after check of damaged repository")
+}
+
+func testRunCheckOutput(gopts GlobalOptions, checkUnused bool) (string, error) {
+ buf, err := withCaptureStdout(func() error {
+ opts := CheckOptions{
+ ReadData: true,
+ CheckUnused: checkUnused,
+ }
+ return runCheck(context.TODO(), opts, gopts, nil)
+ })
+ return buf.String(), err
+}
diff --git a/cmd/restic/cmd_copy.go b/cmd/restic/cmd_copy.go
index 2f095972a..eaa0ef81a 100644
--- a/cmd/restic/cmd_copy.go
+++ b/cmd/restic/cmd_copy.go
@@ -6,6 +6,7 @@ import (
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/debug"
+ "github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"golang.org/x/sync/errgroup"
@@ -74,14 +75,14 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []
if !gopts.NoLock {
var srcLock *restic.Lock
- srcLock, ctx, err = lockRepo(ctx, srcRepo)
+ srcLock, ctx, err = lockRepo(ctx, srcRepo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(srcLock)
if err != nil {
return err
}
}
- dstLock, ctx, err := lockRepo(ctx, dstRepo)
+ dstLock, ctx, err := lockRepo(ctx, dstRepo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(dstLock)
if err != nil {
return err
@@ -236,5 +237,8 @@ func copyTree(ctx context.Context, srcRepo restic.Repository, dstRepo restic.Rep
bar := newProgressMax(!quiet, uint64(len(packList)), "packs copied")
_, err = repository.Repack(ctx, srcRepo, dstRepo, packList, copyBlobs, bar)
bar.Done()
- return err
+ if err != nil {
+ return errors.Fatal(err.Error())
+ }
+ return nil
}
diff --git a/cmd/restic/cmd_copy_integration_test.go b/cmd/restic/cmd_copy_integration_test.go
new file mode 100644
index 000000000..1c8837690
--- /dev/null
+++ b/cmd/restic/cmd_copy_integration_test.go
@@ -0,0 +1,136 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "path/filepath"
+ "testing"
+
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func testRunCopy(t testing.TB, srcGopts GlobalOptions, dstGopts GlobalOptions) {
+ gopts := srcGopts
+ gopts.Repo = dstGopts.Repo
+ gopts.password = dstGopts.password
+ copyOpts := CopyOptions{
+ secondaryRepoOptions: secondaryRepoOptions{
+ Repo: srcGopts.Repo,
+ password: srcGopts.password,
+ },
+ }
+
+ rtest.OK(t, runCopy(context.TODO(), copyOpts, gopts, nil))
+}
+
+func TestCopy(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+ env2, cleanup2 := withTestEnvironment(t)
+ defer cleanup2()
+
+ testSetupBackupData(t, env)
+ opts := BackupOptions{}
+ testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
+ testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
+ testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ testRunInit(t, env2.gopts)
+ testRunCopy(t, env.gopts, env2.gopts)
+
+ snapshotIDs := testListSnapshots(t, env.gopts, 3)
+ copiedSnapshotIDs := testListSnapshots(t, env2.gopts, 3)
+
+ // Check that the copies size seems reasonable
+ stat := dirStats(env.repo)
+ stat2 := dirStats(env2.repo)
+ sizeDiff := int64(stat.size) - int64(stat2.size)
+ if sizeDiff < 0 {
+ sizeDiff = -sizeDiff
+ }
+ rtest.Assert(t, sizeDiff < int64(stat.size)/50, "expected less than 2%% size difference: %v vs. %v",
+ stat.size, stat2.size)
+
+ // Check integrity of the copy
+ testRunCheck(t, env2.gopts)
+
+ // Check that the copied snapshots have the same tree contents as the old ones (= identical tree hash)
+ origRestores := make(map[string]struct{})
+ for i, snapshotID := range snapshotIDs {
+ restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
+ origRestores[restoredir] = struct{}{}
+ testRunRestore(t, env.gopts, restoredir, snapshotID)
+ }
+ for i, snapshotID := range copiedSnapshotIDs {
+ restoredir := filepath.Join(env2.base, fmt.Sprintf("restore%d", i))
+ testRunRestore(t, env2.gopts, restoredir, snapshotID)
+ foundMatch := false
+ for cmpdir := range origRestores {
+ diff := directoriesContentsDiff(restoredir, cmpdir)
+ if diff == "" {
+ delete(origRestores, cmpdir)
+ foundMatch = true
+ }
+ }
+
+ rtest.Assert(t, foundMatch, "found no counterpart for snapshot %v", snapshotID)
+ }
+
+ rtest.Assert(t, len(origRestores) == 0, "found not copied snapshots")
+}
+
+func TestCopyIncremental(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+ env2, cleanup2 := withTestEnvironment(t)
+ defer cleanup2()
+
+ testSetupBackupData(t, env)
+ opts := BackupOptions{}
+ testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
+ testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ testRunInit(t, env2.gopts)
+ testRunCopy(t, env.gopts, env2.gopts)
+
+ testListSnapshots(t, env.gopts, 2)
+ testListSnapshots(t, env2.gopts, 2)
+
+ // Check that the copies size seems reasonable
+ testRunCheck(t, env2.gopts)
+
+ // check that no snapshots are copied, as there are no new ones
+ testRunCopy(t, env.gopts, env2.gopts)
+ testRunCheck(t, env2.gopts)
+ testListSnapshots(t, env2.gopts, 2)
+
+ // check that only new snapshots are copied
+ testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
+ testRunCopy(t, env.gopts, env2.gopts)
+ testRunCheck(t, env2.gopts)
+ testListSnapshots(t, env.gopts, 3)
+ testListSnapshots(t, env2.gopts, 3)
+
+ // also test the reverse direction
+ testRunCopy(t, env2.gopts, env.gopts)
+ testRunCheck(t, env.gopts)
+ testListSnapshots(t, env.gopts, 3)
+}
+
+func TestCopyUnstableJSON(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+ env2, cleanup2 := withTestEnvironment(t)
+ defer cleanup2()
+
+ // contains a symlink created using `ln -s '../i/'$'\355\246\361''d/samba' broken-symlink`
+ datafile := filepath.Join("testdata", "copy-unstable-json.tar.gz")
+ rtest.SetupTarTestFixture(t, env.base, datafile)
+
+ testRunInit(t, env2.gopts)
+ testRunCopy(t, env.gopts, env2.gopts)
+ testRunCheck(t, env2.gopts)
+ testListSnapshots(t, env2.gopts, 1)
+}
diff --git a/cmd/restic/cmd_debug.go b/cmd/restic/cmd_debug.go
index c8626d46c..a54200c45 100644
--- a/cmd/restic/cmd_debug.go
+++ b/cmd/restic/cmd_debug.go
@@ -156,7 +156,7 @@ func runDebugDump(ctx context.Context, gopts GlobalOptions, args []string) error
if !gopts.NoLock {
var lock *restic.Lock
- lock, ctx, err = lockRepo(ctx, repo)
+ lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
@@ -167,20 +167,20 @@ func runDebugDump(ctx context.Context, gopts GlobalOptions, args []string) error
switch tpe {
case "indexes":
- return dumpIndexes(ctx, repo, gopts.stdout)
+ return dumpIndexes(ctx, repo, globalOptions.stdout)
case "snapshots":
- return debugPrintSnapshots(ctx, repo, gopts.stdout)
+ return debugPrintSnapshots(ctx, repo, globalOptions.stdout)
case "packs":
- return printPacks(ctx, repo, gopts.stdout)
+ return printPacks(ctx, repo, globalOptions.stdout)
case "all":
Printf("snapshots:\n")
- err := debugPrintSnapshots(ctx, repo, gopts.stdout)
+ err := debugPrintSnapshots(ctx, repo, globalOptions.stdout)
if err != nil {
return err
}
Printf("\nindexes:\n")
- err = dumpIndexes(ctx, repo, gopts.stdout)
+ err = dumpIndexes(ctx, repo, globalOptions.stdout)
if err != nil {
return err
}
@@ -462,7 +462,7 @@ func runDebugExamine(ctx context.Context, gopts GlobalOptions, args []string) er
if !gopts.NoLock {
var lock *restic.Lock
- lock, ctx, err = lockRepo(ctx, repo)
+ lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
diff --git a/cmd/restic/cmd_diff.go b/cmd/restic/cmd_diff.go
index 0000fd18a..28e60f464 100644
--- a/cmd/restic/cmd_diff.go
+++ b/cmd/restic/cmd_diff.go
@@ -54,12 +54,12 @@ func init() {
f.BoolVar(&diffOptions.ShowMetadata, "metadata", false, "print changes in metadata")
}
-func loadSnapshot(ctx context.Context, be restic.Lister, repo restic.Repository, desc string) (*restic.Snapshot, error) {
- sn, err := restic.FindSnapshot(ctx, be, repo, desc)
+func loadSnapshot(ctx context.Context, be restic.Lister, repo restic.Repository, desc string) (*restic.Snapshot, string, error) {
+ sn, subfolder, err := restic.FindSnapshot(ctx, be, repo, desc)
if err != nil {
- return nil, errors.Fatal(err.Error())
+ return nil, "", errors.Fatal(err.Error())
}
- return sn, err
+ return sn, subfolder, err
}
// Comparer collects all things needed to compare two snapshots.
@@ -334,7 +334,7 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
if !gopts.NoLock {
var lock *restic.Lock
- lock, ctx, err = lockRepo(ctx, repo)
+ lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
@@ -346,12 +346,12 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
if err != nil {
return err
}
- sn1, err := loadSnapshot(ctx, be, repo, args[0])
+ sn1, subfolder1, err := loadSnapshot(ctx, be, repo, args[0])
if err != nil {
return err
}
- sn2, err := loadSnapshot(ctx, be, repo, args[1])
+ sn2, subfolder2, err := loadSnapshot(ctx, be, repo, args[1])
if err != nil {
return err
}
@@ -372,6 +372,16 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
return errors.Errorf("snapshot %v has nil tree", sn2.ID().Str())
}
+ sn1.Tree, err = restic.FindTreeDirectory(ctx, repo, sn1.Tree, subfolder1)
+ if err != nil {
+ return err
+ }
+
+ sn2.Tree, err = restic.FindTreeDirectory(ctx, repo, sn2.Tree, subfolder2)
+ if err != nil {
+ return err
+ }
+
c := &Comparer{
repo: repo,
opts: diffOptions,
@@ -381,7 +391,7 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
}
if gopts.JSON {
- enc := json.NewEncoder(gopts.stdout)
+ enc := json.NewEncoder(globalOptions.stdout)
c.printChange = func(change *Change) {
err := enc.Encode(change)
if err != nil {
@@ -415,7 +425,7 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
updateBlobs(repo, stats.BlobsAfter.Sub(both).Sub(stats.BlobsCommon), &stats.Added)
if gopts.JSON {
- err := json.NewEncoder(gopts.stdout).Encode(stats)
+ err := json.NewEncoder(globalOptions.stdout).Encode(stats)
if err != nil {
Warnf("JSON encode failed: %v\n", err)
}
diff --git a/cmd/restic/cmd_diff_integration_test.go b/cmd/restic/cmd_diff_integration_test.go
new file mode 100644
index 000000000..8782053ed
--- /dev/null
+++ b/cmd/restic/cmd_diff_integration_test.go
@@ -0,0 +1,193 @@
+package main
+
+import (
+ "bufio"
+ "context"
+ "encoding/json"
+ "io"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "testing"
+
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func testRunDiffOutput(gopts GlobalOptions, firstSnapshotID string, secondSnapshotID string) (string, error) {
+ buf, err := withCaptureStdout(func() error {
+ opts := DiffOptions{
+ ShowMetadata: false,
+ }
+ return runDiff(context.TODO(), opts, gopts, []string{firstSnapshotID, secondSnapshotID})
+ })
+ return buf.String(), err
+}
+
+func copyFile(dst string, src string) error {
+ srcFile, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+
+ dstFile, err := os.Create(dst)
+ if err != nil {
+ // ignore subsequent errors
+ _ = srcFile.Close()
+ return err
+ }
+
+ _, err = io.Copy(dstFile, srcFile)
+ if err != nil {
+ // ignore subsequent errors
+ _ = srcFile.Close()
+ _ = dstFile.Close()
+ return err
+ }
+
+ err = srcFile.Close()
+ if err != nil {
+ // ignore subsequent errors
+ _ = dstFile.Close()
+ return err
+ }
+
+ err = dstFile.Close()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+var diffOutputRegexPatterns = []string{
+ "-.+modfile",
+ "M.+modfile1",
+ "\\+.+modfile2",
+ "\\+.+modfile3",
+ "\\+.+modfile4",
+ "-.+submoddir",
+ "-.+submoddir.subsubmoddir",
+ "\\+.+submoddir2",
+ "\\+.+submoddir2.subsubmoddir",
+ "Files: +2 new, +1 removed, +1 changed",
+ "Dirs: +3 new, +2 removed",
+ "Data Blobs: +2 new, +1 removed",
+ "Added: +7[0-9]{2}\\.[0-9]{3} KiB",
+ "Removed: +2[0-9]{2}\\.[0-9]{3} KiB",
+}
+
+func setupDiffRepo(t *testing.T) (*testEnvironment, func(), string, string) {
+ env, cleanup := withTestEnvironment(t)
+ testRunInit(t, env.gopts)
+
+ datadir := filepath.Join(env.base, "testdata")
+ testdir := filepath.Join(datadir, "testdir")
+ subtestdir := filepath.Join(testdir, "subtestdir")
+ testfile := filepath.Join(testdir, "testfile")
+
+ rtest.OK(t, os.Mkdir(testdir, 0755))
+ rtest.OK(t, os.Mkdir(subtestdir, 0755))
+ rtest.OK(t, appendRandomData(testfile, 256*1024))
+
+ moddir := filepath.Join(datadir, "moddir")
+ submoddir := filepath.Join(moddir, "submoddir")
+ subsubmoddir := filepath.Join(submoddir, "subsubmoddir")
+ modfile := filepath.Join(moddir, "modfile")
+ rtest.OK(t, os.Mkdir(moddir, 0755))
+ rtest.OK(t, os.Mkdir(submoddir, 0755))
+ rtest.OK(t, os.Mkdir(subsubmoddir, 0755))
+ rtest.OK(t, copyFile(modfile, testfile))
+ rtest.OK(t, appendRandomData(modfile+"1", 256*1024))
+
+ snapshots := make(map[string]struct{})
+ opts := BackupOptions{}
+ testRunBackup(t, "", []string{datadir}, opts, env.gopts)
+ snapshots, firstSnapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
+
+ rtest.OK(t, os.Rename(modfile, modfile+"3"))
+ rtest.OK(t, os.Rename(submoddir, submoddir+"2"))
+ rtest.OK(t, appendRandomData(modfile+"1", 256*1024))
+ rtest.OK(t, appendRandomData(modfile+"2", 256*1024))
+ rtest.OK(t, os.Mkdir(modfile+"4", 0755))
+
+ testRunBackup(t, "", []string{datadir}, opts, env.gopts)
+ _, secondSnapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
+
+ return env, cleanup, firstSnapshotID, secondSnapshotID
+}
+
+func TestDiff(t *testing.T) {
+ env, cleanup, firstSnapshotID, secondSnapshotID := setupDiffRepo(t)
+ defer cleanup()
+
+ // quiet suppresses the diff output except for the summary
+ env.gopts.Quiet = false
+ _, err := testRunDiffOutput(env.gopts, "", secondSnapshotID)
+ rtest.Assert(t, err != nil, "expected error on invalid snapshot id")
+
+ out, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID)
+ rtest.OK(t, err)
+
+ for _, pattern := range diffOutputRegexPatterns {
+ r, err := regexp.Compile(pattern)
+ rtest.Assert(t, err == nil, "failed to compile regexp %v", pattern)
+ rtest.Assert(t, r.MatchString(out), "expected pattern %v in output, got\n%v", pattern, out)
+ }
+
+ // check quiet output
+ env.gopts.Quiet = true
+ outQuiet, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID)
+ rtest.OK(t, err)
+
+ rtest.Assert(t, len(outQuiet) < len(out), "expected shorter output on quiet mode %v vs. %v", len(outQuiet), len(out))
+}
+
+type typeSniffer struct {
+ MessageType string `json:"message_type"`
+}
+
+func TestDiffJSON(t *testing.T) {
+ env, cleanup, firstSnapshotID, secondSnapshotID := setupDiffRepo(t)
+ defer cleanup()
+
+ // quiet suppresses the diff output except for the summary
+ env.gopts.Quiet = false
+ env.gopts.JSON = true
+ out, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID)
+ rtest.OK(t, err)
+
+ var stat DiffStatsContainer
+ var changes int
+
+ scanner := bufio.NewScanner(strings.NewReader(out))
+ for scanner.Scan() {
+ line := scanner.Text()
+ var sniffer typeSniffer
+ rtest.OK(t, json.Unmarshal([]byte(line), &sniffer))
+ switch sniffer.MessageType {
+ case "change":
+ changes++
+ case "statistics":
+ rtest.OK(t, json.Unmarshal([]byte(line), &stat))
+ default:
+ t.Fatalf("unexpected message type %v", sniffer.MessageType)
+ }
+ }
+ rtest.Equals(t, 9, changes)
+ rtest.Assert(t, stat.Added.Files == 2 && stat.Added.Dirs == 3 && stat.Added.DataBlobs == 2 &&
+ stat.Removed.Files == 1 && stat.Removed.Dirs == 2 && stat.Removed.DataBlobs == 1 &&
+ stat.ChangedFiles == 1, "unexpected statistics")
+
+ // check quiet output
+ env.gopts.Quiet = true
+ outQuiet, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID)
+ rtest.OK(t, err)
+
+ stat = DiffStatsContainer{}
+ rtest.OK(t, json.Unmarshal([]byte(outQuiet), &stat))
+ rtest.Assert(t, stat.Added.Files == 2 && stat.Added.Dirs == 3 && stat.Added.DataBlobs == 2 &&
+ stat.Removed.Files == 1 && stat.Removed.Dirs == 2 && stat.Removed.DataBlobs == 1 &&
+ stat.ChangedFiles == 1, "unexpected statistics")
+ rtest.Assert(t, stat.SourceSnapshot == firstSnapshotID && stat.TargetSnapshot == secondSnapshotID, "unexpected snapshot ids")
+}
diff --git a/cmd/restic/cmd_dump.go b/cmd/restic/cmd_dump.go
index cda7b65b9..9acae7ca8 100644
--- a/cmd/restic/cmd_dump.go
+++ b/cmd/restic/cmd_dump.go
@@ -132,14 +132,14 @@ func runDump(ctx context.Context, opts DumpOptions, gopts GlobalOptions, args []
if !gopts.NoLock {
var lock *restic.Lock
- lock, ctx, err = lockRepo(ctx, repo)
+ lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
}
- sn, err := (&restic.SnapshotFilter{
+ sn, subfolder, err := (&restic.SnapshotFilter{
Hosts: opts.Hosts,
Paths: opts.Paths,
Tags: opts.Tags,
@@ -153,6 +153,11 @@ func runDump(ctx context.Context, opts DumpOptions, gopts GlobalOptions, args []
return err
}
+ sn.Tree, err = restic.FindTreeDirectory(ctx, repo, sn.Tree, subfolder)
+ if err != nil {
+ return err
+ }
+
tree, err := restic.LoadTree(ctx, repo, *sn.Tree)
if err != nil {
return errors.Fatalf("loading tree for snapshot %q failed: %v", snapshotIDString, err)
diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go
index e5457c3be..181d8595d 100644
--- a/cmd/restic/cmd_find.go
+++ b/cmd/restic/cmd_find.go
@@ -51,6 +51,7 @@ type FindOptions struct {
PackID, ShowPackID bool
CaseInsensitive bool
ListLong bool
+ HumanReadable bool
restic.SnapshotFilter
}
@@ -69,6 +70,7 @@ func init() {
f.BoolVar(&findOptions.ShowPackID, "show-pack-id", false, "display the pack-ID the blobs belong to (with --blob or --tree)")
f.BoolVarP(&findOptions.CaseInsensitive, "ignore-case", "i", false, "ignore case for pattern")
f.BoolVarP(&findOptions.ListLong, "long", "l", false, "use a long listing format showing size and mode")
+ f.BoolVar(&findOptions.HumanReadable, "human-readable", false, "print sizes in human readable format")
initMultiSnapshotFilter(f, &findOptions.SnapshotFilter, true)
}
@@ -104,12 +106,13 @@ func parseTime(str string) (time.Time, error) {
}
type statefulOutput struct {
- ListLong bool
- JSON bool
- inuse bool
- newsn *restic.Snapshot
- oldsn *restic.Snapshot
- hits int
+ ListLong bool
+ HumanReadable bool
+ JSON bool
+ inuse bool
+ newsn *restic.Snapshot
+ oldsn *restic.Snapshot
+ hits int
}
func (s *statefulOutput) PrintPatternJSON(path string, node *restic.Node) {
@@ -164,7 +167,7 @@ func (s *statefulOutput) PrintPatternNormal(path string, node *restic.Node) {
s.oldsn = s.newsn
Verbosef("Found matching entries in snapshot %s from %s\n", s.oldsn.ID().Str(), s.oldsn.Time.Local().Format(TimeFormat))
}
- Println(formatNode(path, node, s.ListLong))
+ Println(formatNode(path, node, s.ListLong, s.HumanReadable))
}
func (s *statefulOutput) PrintPattern(path string, node *restic.Node) {
@@ -501,7 +504,7 @@ func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struc
return packIDs
}
-func (f *Finder) findObjectPack(ctx context.Context, id string, t restic.BlobType) {
+func (f *Finder) findObjectPack(id string, t restic.BlobType) {
idx := f.repo.Index()
rid, err := restic.ParseID(id)
@@ -524,13 +527,13 @@ func (f *Finder) findObjectPack(ctx context.Context, id string, t restic.BlobTyp
}
}
-func (f *Finder) findObjectsPacks(ctx context.Context) {
+func (f *Finder) findObjectsPacks() {
for i := range f.blobIDs {
- f.findObjectPack(ctx, i, restic.DataBlob)
+ f.findObjectPack(i, restic.DataBlob)
}
for i := range f.treeIDs {
- f.findObjectPack(ctx, i, restic.TreeBlob)
+ f.findObjectPack(i, restic.TreeBlob)
}
}
@@ -575,7 +578,7 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args []
if !gopts.NoLock {
var lock *restic.Lock
- lock, ctx, err = lockRepo(ctx, repo)
+ lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
@@ -594,7 +597,7 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args []
f := &Finder{
repo: repo,
pat: pat,
- out: statefulOutput{ListLong: opts.ListLong, JSON: globalOptions.JSON},
+ out: statefulOutput{ListLong: opts.ListLong, HumanReadable: opts.HumanReadable, JSON: gopts.JSON},
ignoreTrees: restic.NewIDSet(),
}
@@ -618,7 +621,16 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args []
}
}
+ var filteredSnapshots []*restic.Snapshot
for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, opts.Snapshots) {
+ filteredSnapshots = append(filteredSnapshots, sn)
+ }
+
+ sort.Slice(filteredSnapshots, func(i, j int) bool {
+ return filteredSnapshots[i].Time.Before(filteredSnapshots[j].Time)
+ })
+
+ for _, sn := range filteredSnapshots {
if f.blobIDs != nil || f.treeIDs != nil {
if err = f.findIDs(ctx, sn); err != nil && err.Error() != "OK" {
return err
@@ -632,7 +644,7 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args []
f.out.Finish()
if opts.ShowPackID && (f.blobIDs != nil || f.treeIDs != nil) {
- f.findObjectsPacks(ctx)
+ f.findObjectsPacks()
}
return nil
diff --git a/cmd/restic/cmd_find_integration_test.go b/cmd/restic/cmd_find_integration_test.go
new file mode 100644
index 000000000..dd8ab87fd
--- /dev/null
+++ b/cmd/restic/cmd_find_integration_test.go
@@ -0,0 +1,87 @@
+package main
+
+import (
+ "context"
+ "encoding/json"
+ "strings"
+ "testing"
+ "time"
+
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func testRunFind(t testing.TB, wantJSON bool, gopts GlobalOptions, pattern string) []byte {
+ buf, err := withCaptureStdout(func() error {
+ gopts.JSON = wantJSON
+
+ opts := FindOptions{}
+ return runFind(context.TODO(), opts, gopts, []string{pattern})
+ })
+ rtest.OK(t, err)
+ return buf.Bytes()
+}
+
+func TestFind(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := testSetupBackupData(t, env)
+ opts := BackupOptions{}
+
+ testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ results := testRunFind(t, false, env.gopts, "unexistingfile")
+ rtest.Assert(t, len(results) == 0, "unexisting file found in repo (%v)", datafile)
+
+ results = testRunFind(t, false, env.gopts, "testfile")
+ lines := strings.Split(string(results), "\n")
+ rtest.Assert(t, len(lines) == 2, "expected one file found in repo (%v)", datafile)
+
+ results = testRunFind(t, false, env.gopts, "testfile*")
+ lines = strings.Split(string(results), "\n")
+ rtest.Assert(t, len(lines) == 4, "expected three files found in repo (%v)", datafile)
+}
+
+type testMatch struct {
+ Path string `json:"path,omitempty"`
+ Permissions string `json:"permissions,omitempty"`
+ Size uint64 `json:"size,omitempty"`
+ Date time.Time `json:"date,omitempty"`
+ UID uint32 `json:"uid,omitempty"`
+ GID uint32 `json:"gid,omitempty"`
+}
+
+type testMatches struct {
+ Hits int `json:"hits,omitempty"`
+ SnapshotID string `json:"snapshot,omitempty"`
+ Matches []testMatch `json:"matches,omitempty"`
+}
+
+func TestFindJSON(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := testSetupBackupData(t, env)
+ opts := BackupOptions{}
+
+ testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ results := testRunFind(t, true, env.gopts, "unexistingfile")
+ matches := []testMatches{}
+ rtest.OK(t, json.Unmarshal(results, &matches))
+ rtest.Assert(t, len(matches) == 0, "expected no match in repo (%v)", datafile)
+
+ results = testRunFind(t, true, env.gopts, "testfile")
+ rtest.OK(t, json.Unmarshal(results, &matches))
+ rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile)
+ rtest.Assert(t, len(matches[0].Matches) == 1, "expected a single file to match (%v)", datafile)
+ rtest.Assert(t, matches[0].Hits == 1, "expected hits to show 1 match (%v)", datafile)
+
+ results = testRunFind(t, true, env.gopts, "testfile*")
+ rtest.OK(t, json.Unmarshal(results, &matches))
+ rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile)
+ rtest.Assert(t, len(matches[0].Matches) == 3, "expected 3 files to match (%v)", datafile)
+ rtest.Assert(t, matches[0].Hits == 3, "expected hits to show 3 matches (%v)", datafile)
+}
diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go
index e4e44a368..22398b806 100644
--- a/cmd/restic/cmd_forget.go
+++ b/cmd/restic/cmd_forget.go
@@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"io"
+ "strconv"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
@@ -36,14 +37,49 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
},
}
+type ForgetPolicyCount int
+
+var ErrNegativePolicyCount = errors.New("negative values not allowed, use 'unlimited' instead")
+
+func (c *ForgetPolicyCount) Set(s string) error {
+ switch s {
+ case "unlimited":
+ *c = -1
+ default:
+ val, err := strconv.ParseInt(s, 10, 0)
+ if err != nil {
+ return err
+ }
+ if val < 0 {
+ return ErrNegativePolicyCount
+ }
+ *c = ForgetPolicyCount(val)
+ }
+
+ return nil
+}
+
+func (c *ForgetPolicyCount) String() string {
+ switch *c {
+ case -1:
+ return "unlimited"
+ default:
+ return strconv.FormatInt(int64(*c), 10)
+ }
+}
+
+func (c *ForgetPolicyCount) Type() string {
+ return "n"
+}
+
// ForgetOptions collects all options for the forget command.
type ForgetOptions struct {
- Last int
- Hourly int
- Daily int
- Weekly int
- Monthly int
- Yearly int
+ Last ForgetPolicyCount
+ Hourly ForgetPolicyCount
+ Daily ForgetPolicyCount
+ Weekly ForgetPolicyCount
+ Monthly ForgetPolicyCount
+ Yearly ForgetPolicyCount
Within restic.Duration
WithinHourly restic.Duration
WithinDaily restic.Duration
@@ -56,7 +92,7 @@ type ForgetOptions struct {
Compact bool
// Grouping
- GroupBy string
+ GroupBy restic.SnapshotGroupByOptions
DryRun bool
Prune bool
}
@@ -67,12 +103,12 @@ func init() {
cmdRoot.AddCommand(cmdForget)
f := cmdForget.Flags()
- f.IntVarP(&forgetOptions.Last, "keep-last", "l", 0, "keep the last `n` snapshots")
- f.IntVarP(&forgetOptions.Hourly, "keep-hourly", "H", 0, "keep the last `n` hourly snapshots")
- f.IntVarP(&forgetOptions.Daily, "keep-daily", "d", 0, "keep the last `n` daily snapshots")
- f.IntVarP(&forgetOptions.Weekly, "keep-weekly", "w", 0, "keep the last `n` weekly snapshots")
- f.IntVarP(&forgetOptions.Monthly, "keep-monthly", "m", 0, "keep the last `n` monthly snapshots")
- f.IntVarP(&forgetOptions.Yearly, "keep-yearly", "y", 0, "keep the last `n` yearly snapshots")
+ f.VarP(&forgetOptions.Last, "keep-last", "l", "keep the last `n` snapshots (use 'unlimited' to keep all snapshots)")
+ f.VarP(&forgetOptions.Hourly, "keep-hourly", "H", "keep the last `n` hourly snapshots (use 'unlimited' to keep all hourly snapshots)")
+ f.VarP(&forgetOptions.Daily, "keep-daily", "d", "keep the last `n` daily snapshots (use 'unlimited' to keep all daily snapshots)")
+ f.VarP(&forgetOptions.Weekly, "keep-weekly", "w", "keep the last `n` weekly snapshots (use 'unlimited' to keep all weekly snapshots)")
+ f.VarP(&forgetOptions.Monthly, "keep-monthly", "m", "keep the last `n` monthly snapshots (use 'unlimited' to keep all monthly snapshots)")
+ f.VarP(&forgetOptions.Yearly, "keep-yearly", "y", "keep the last `n` yearly snapshots (use 'unlimited' to keep all yearly snapshots)")
f.VarP(&forgetOptions.Within, "keep-within", "", "keep snapshots that are newer than `duration` (eg. 1y5m7d2h) relative to the latest snapshot")
f.VarP(&forgetOptions.WithinHourly, "keep-within-hourly", "", "keep hourly snapshots that are newer than `duration` (eg. 1y5m7d2h) relative to the latest snapshot")
f.VarP(&forgetOptions.WithinDaily, "keep-within-daily", "", "keep daily snapshots that are newer than `duration` (eg. 1y5m7d2h) relative to the latest snapshot")
@@ -90,8 +126,8 @@ func init() {
}
f.BoolVarP(&forgetOptions.Compact, "compact", "c", false, "use compact output format")
-
- f.StringVarP(&forgetOptions.GroupBy, "group-by", "g", "host,paths", "`group` snapshots by host, paths and/or tags, separated by comma (disable grouping with '')")
+ forgetOptions.GroupBy = restic.SnapshotGroupByOptions{Host: true, Path: true}
+ f.VarP(&forgetOptions.GroupBy, "group-by", "g", "`group` snapshots by host, paths and/or tags, separated by comma (disable grouping with '')")
f.BoolVarP(&forgetOptions.DryRun, "dry-run", "n", false, "do not delete anything, just print what would be done")
f.BoolVar(&forgetOptions.Prune, "prune", false, "automatically run the 'prune' command if snapshots have been removed")
@@ -99,8 +135,29 @@ func init() {
addPruneOptions(cmdForget)
}
+func verifyForgetOptions(opts *ForgetOptions) error {
+ if opts.Last < -1 || opts.Hourly < -1 || opts.Daily < -1 || opts.Weekly < -1 ||
+ opts.Monthly < -1 || opts.Yearly < -1 {
+ return errors.Fatal("negative values other than -1 are not allowed for --keep-*")
+ }
+
+ for _, d := range []restic.Duration{opts.Within, opts.WithinHourly, opts.WithinDaily,
+ opts.WithinMonthly, opts.WithinWeekly, opts.WithinYearly} {
+ if d.Hours < 0 || d.Days < 0 || d.Months < 0 || d.Years < 0 {
+ return errors.Fatal("durations containing negative values are not allowed for --keep-within*")
+ }
+ }
+
+ return nil
+}
+
func runForget(ctx context.Context, opts ForgetOptions, gopts GlobalOptions, args []string) error {
- err := verifyPruneOptions(&pruneOptions)
+ err := verifyForgetOptions(&opts)
+ if err != nil {
+ return err
+ }
+
+ err = verifyPruneOptions(&pruneOptions)
if err != nil {
return err
}
@@ -116,7 +173,7 @@ func runForget(ctx context.Context, opts ForgetOptions, gopts GlobalOptions, arg
if !opts.DryRun || !gopts.NoLock {
var lock *restic.Lock
- lock, ctx, err = lockRepoExclusive(ctx, repo)
+ lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
@@ -144,12 +201,12 @@ func runForget(ctx context.Context, opts ForgetOptions, gopts GlobalOptions, arg
}
policy := restic.ExpirePolicy{
- Last: opts.Last,
- Hourly: opts.Hourly,
- Daily: opts.Daily,
- Weekly: opts.Weekly,
- Monthly: opts.Monthly,
- Yearly: opts.Yearly,
+ Last: int(opts.Last),
+ Hourly: int(opts.Hourly),
+ Daily: int(opts.Daily),
+ Weekly: int(opts.Weekly),
+ Monthly: int(opts.Monthly),
+ Yearly: int(opts.Yearly),
Within: opts.Within,
WithinHourly: opts.WithinHourly,
WithinDaily: opts.WithinDaily,
@@ -172,7 +229,7 @@ func runForget(ctx context.Context, opts ForgetOptions, gopts GlobalOptions, arg
for k, snapshotGroup := range snapshotGroups {
if gopts.Verbose >= 1 && !gopts.JSON {
- err = PrintSnapshotGroupHeader(gopts.stdout, k)
+ err = PrintSnapshotGroupHeader(globalOptions.stdout, k)
if err != nil {
return err
}
@@ -229,7 +286,7 @@ func runForget(ctx context.Context, opts ForgetOptions, gopts GlobalOptions, arg
}
if gopts.JSON && len(jsonGroups) > 0 {
- err = printJSONForget(gopts.stdout, jsonGroups)
+ err = printJSONForget(globalOptions.stdout, jsonGroups)
if err != nil {
return err
}
diff --git a/cmd/restic/cmd_forget_integration_test.go b/cmd/restic/cmd_forget_integration_test.go
new file mode 100644
index 000000000..8908d5a5f
--- /dev/null
+++ b/cmd/restic/cmd_forget_integration_test.go
@@ -0,0 +1,13 @@
+package main
+
+import (
+ "context"
+ "testing"
+
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) {
+ opts := ForgetOptions{}
+ rtest.OK(t, runForget(context.TODO(), opts, gopts, args))
+}
diff --git a/cmd/restic/cmd_forget_test.go b/cmd/restic/cmd_forget_test.go
new file mode 100644
index 000000000..ddeef028a
--- /dev/null
+++ b/cmd/restic/cmd_forget_test.go
@@ -0,0 +1,94 @@
+package main
+
+import (
+ "testing"
+
+ "github.com/restic/restic/internal/restic"
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func TestForgetPolicyValues(t *testing.T) {
+ testCases := []struct {
+ input string
+ value ForgetPolicyCount
+ err string
+ }{
+ {"0", ForgetPolicyCount(0), ""},
+ {"1", ForgetPolicyCount(1), ""},
+ {"unlimited", ForgetPolicyCount(-1), ""},
+ {"", ForgetPolicyCount(0), "strconv.ParseInt: parsing \"\": invalid syntax"},
+ {"-1", ForgetPolicyCount(0), ErrNegativePolicyCount.Error()},
+ {"abc", ForgetPolicyCount(0), "strconv.ParseInt: parsing \"abc\": invalid syntax"},
+ }
+ for _, testCase := range testCases {
+ t.Run("", func(t *testing.T) {
+ var count ForgetPolicyCount
+ err := count.Set(testCase.input)
+
+ if testCase.err != "" {
+ rtest.Assert(t, err != nil, "should have returned error for input %+v", testCase.input)
+ rtest.Equals(t, testCase.err, err.Error())
+ } else {
+ rtest.Assert(t, err == nil, "expected no error for input %+v, got %v", testCase.input, err)
+ rtest.Equals(t, testCase.value, count)
+ rtest.Equals(t, testCase.input, count.String())
+ }
+ })
+ }
+}
+
+func TestForgetOptionValues(t *testing.T) {
+ const negValErrorMsg = "Fatal: negative values other than -1 are not allowed for --keep-*"
+ const negDurationValErrorMsg = "Fatal: durations containing negative values are not allowed for --keep-within*"
+ testCases := []struct {
+ input ForgetOptions
+ errorMsg string
+ }{
+ {ForgetOptions{Last: 1}, ""},
+ {ForgetOptions{Hourly: 1}, ""},
+ {ForgetOptions{Daily: 1}, ""},
+ {ForgetOptions{Weekly: 1}, ""},
+ {ForgetOptions{Monthly: 1}, ""},
+ {ForgetOptions{Yearly: 1}, ""},
+ {ForgetOptions{Last: 0}, ""},
+ {ForgetOptions{Hourly: 0}, ""},
+ {ForgetOptions{Daily: 0}, ""},
+ {ForgetOptions{Weekly: 0}, ""},
+ {ForgetOptions{Monthly: 0}, ""},
+ {ForgetOptions{Yearly: 0}, ""},
+ {ForgetOptions{Last: -1}, ""},
+ {ForgetOptions{Hourly: -1}, ""},
+ {ForgetOptions{Daily: -1}, ""},
+ {ForgetOptions{Weekly: -1}, ""},
+ {ForgetOptions{Monthly: -1}, ""},
+ {ForgetOptions{Yearly: -1}, ""},
+ {ForgetOptions{Last: -2}, negValErrorMsg},
+ {ForgetOptions{Hourly: -2}, negValErrorMsg},
+ {ForgetOptions{Daily: -2}, negValErrorMsg},
+ {ForgetOptions{Weekly: -2}, negValErrorMsg},
+ {ForgetOptions{Monthly: -2}, negValErrorMsg},
+ {ForgetOptions{Yearly: -2}, negValErrorMsg},
+ {ForgetOptions{Within: restic.ParseDurationOrPanic("1y2m3d3h")}, ""},
+ {ForgetOptions{WithinHourly: restic.ParseDurationOrPanic("1y2m3d3h")}, ""},
+ {ForgetOptions{WithinDaily: restic.ParseDurationOrPanic("1y2m3d3h")}, ""},
+ {ForgetOptions{WithinWeekly: restic.ParseDurationOrPanic("1y2m3d3h")}, ""},
+ {ForgetOptions{WithinMonthly: restic.ParseDurationOrPanic("2y4m6d8h")}, ""},
+ {ForgetOptions{WithinYearly: restic.ParseDurationOrPanic("2y4m6d8h")}, ""},
+ {ForgetOptions{Within: restic.ParseDurationOrPanic("-1y2m3d3h")}, negDurationValErrorMsg},
+ {ForgetOptions{WithinHourly: restic.ParseDurationOrPanic("1y-2m3d3h")}, negDurationValErrorMsg},
+ {ForgetOptions{WithinDaily: restic.ParseDurationOrPanic("1y2m-3d3h")}, negDurationValErrorMsg},
+ {ForgetOptions{WithinWeekly: restic.ParseDurationOrPanic("1y2m3d-3h")}, negDurationValErrorMsg},
+ {ForgetOptions{WithinMonthly: restic.ParseDurationOrPanic("-2y4m6d8h")}, negDurationValErrorMsg},
+ {ForgetOptions{WithinYearly: restic.ParseDurationOrPanic("2y-4m6d8h")}, negDurationValErrorMsg},
+ }
+
+ for _, testCase := range testCases {
+ err := verifyForgetOptions(&testCase.input)
+ if testCase.errorMsg != "" {
+ rtest.Assert(t, err != nil, "should have returned error for input %+v", testCase.input)
+ rtest.Equals(t, testCase.errorMsg, err.Error())
+ } else {
+ rtest.Assert(t, err == nil, "expected no error for input %+v", testCase.input)
+ }
+ }
+}
diff --git a/cmd/restic/cmd_generate.go b/cmd/restic/cmd_generate.go
index 959a9d518..b284767ca 100644
--- a/cmd/restic/cmd_generate.go
+++ b/cmd/restic/cmd_generate.go
@@ -63,26 +63,38 @@ func writeManpages(dir string) error {
}
func writeBashCompletion(file string) error {
- Verbosef("writing bash completion file to %v\n", file)
+ if stdoutIsTerminal() {
+ Verbosef("writing bash completion file to %v\n", file)
+ }
return cmdRoot.GenBashCompletionFile(file)
}
func writeFishCompletion(file string) error {
- Verbosef("writing fish completion file to %v\n", file)
+ if stdoutIsTerminal() {
+ Verbosef("writing fish completion file to %v\n", file)
+ }
return cmdRoot.GenFishCompletionFile(file, true)
}
func writeZSHCompletion(file string) error {
- Verbosef("writing zsh completion file to %v\n", file)
+ if stdoutIsTerminal() {
+ Verbosef("writing zsh completion file to %v\n", file)
+ }
return cmdRoot.GenZshCompletionFile(file)
}
func writePowerShellCompletion(file string) error {
- Verbosef("writing powershell completion file to %v\n", file)
+ if stdoutIsTerminal() {
+ Verbosef("writing powershell completion file to %v\n", file)
+ }
return cmdRoot.GenPowerShellCompletionFile(file)
}
-func runGenerate(cmd *cobra.Command, args []string) error {
+func runGenerate(_ *cobra.Command, args []string) error {
+ if len(args) > 0 {
+ return errors.Fatal("the generate command expects no arguments, only options - please see `restic help generate` for usage and flags")
+ }
+
if genOpts.ManDir != "" {
err := writeManpages(genOpts.ManDir)
if err != nil {
diff --git a/cmd/restic/cmd_init.go b/cmd/restic/cmd_init.go
index 2932870e8..b9dabdc2d 100644
--- a/cmd/restic/cmd_init.go
+++ b/cmd/restic/cmd_init.go
@@ -50,6 +50,10 @@ func init() {
}
func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []string) error {
+ if len(args) > 0 {
+ return errors.Fatal("the init command expects no arguments, only options - please see `restic help init` for usage and flags")
+ }
+
var version uint
if opts.RepositoryVersion == "latest" || opts.RepositoryVersion == "" {
version = restic.MaxRepoVersion
@@ -83,9 +87,9 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []
return err
}
- be, err := create(ctx, repo, gopts.extended)
+ be, err := create(ctx, repo, gopts, gopts.extended)
if err != nil {
- return errors.Fatalf("create repository at %s failed: %v\n", location.StripPassword(gopts.Repo), err)
+ return errors.Fatalf("create repository at %s failed: %v\n", location.StripPassword(gopts.backends, gopts.Repo), err)
}
s, err := repository.New(be, repository.Options{
@@ -93,16 +97,21 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []
PackSize: gopts.PackSize * 1024 * 1024,
})
if err != nil {
- return err
+ return errors.Fatal(err.Error())
}
err = s.Init(ctx, version, gopts.password, chunkerPolynomial)
if err != nil {
- return errors.Fatalf("create key in repository at %s failed: %v\n", location.StripPassword(gopts.Repo), err)
+ return errors.Fatalf("create key in repository at %s failed: %v\n", location.StripPassword(gopts.backends, gopts.Repo), err)
}
if !gopts.JSON {
- Verbosef("created restic repository %v at %s\n", s.Config().ID[:10], location.StripPassword(gopts.Repo))
+ Verbosef("created restic repository %v at %s", s.Config().ID[:10], location.StripPassword(gopts.backends, gopts.Repo))
+ if opts.CopyChunkerParameters && chunkerPolynomial != nil {
+ Verbosef(" with chunker parameters copied from secondary repository\n")
+ } else {
+ Verbosef("\n")
+ }
Verbosef("\n")
Verbosef("Please note that knowledge of your password is required to access\n")
Verbosef("the repository. Losing your password means that your data is\n")
@@ -112,9 +121,9 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []
status := initSuccess{
MessageType: "initialized",
ID: s.Config().ID,
- Repository: location.StripPassword(gopts.Repo),
+ Repository: location.StripPassword(gopts.backends, gopts.Repo),
}
- return json.NewEncoder(gopts.stdout).Encode(status)
+ return json.NewEncoder(globalOptions.stdout).Encode(status)
}
return nil
diff --git a/cmd/restic/cmd_init_integration_test.go b/cmd/restic/cmd_init_integration_test.go
new file mode 100644
index 000000000..9b5eed6e0
--- /dev/null
+++ b/cmd/restic/cmd_init_integration_test.go
@@ -0,0 +1,49 @@
+package main
+
+import (
+ "context"
+ "testing"
+
+ "github.com/restic/restic/internal/repository"
+ "github.com/restic/restic/internal/restic"
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func testRunInit(t testing.TB, opts GlobalOptions) {
+ repository.TestUseLowSecurityKDFParameters(t)
+ restic.TestDisableCheckPolynomial(t)
+ restic.TestSetLockTimeout(t, 0)
+
+ rtest.OK(t, runInit(context.TODO(), InitOptions{}, opts, nil))
+ t.Logf("repository initialized at %v", opts.Repo)
+}
+
+func TestInitCopyChunkerParams(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+ env2, cleanup2 := withTestEnvironment(t)
+ defer cleanup2()
+
+ testRunInit(t, env2.gopts)
+
+ initOpts := InitOptions{
+ secondaryRepoOptions: secondaryRepoOptions{
+ Repo: env2.gopts.Repo,
+ password: env2.gopts.password,
+ },
+ }
+ rtest.Assert(t, runInit(context.TODO(), initOpts, env.gopts, nil) != nil, "expected invalid init options to fail")
+
+ initOpts.CopyChunkerParameters = true
+ rtest.OK(t, runInit(context.TODO(), initOpts, env.gopts, nil))
+
+ repo, err := OpenRepository(context.TODO(), env.gopts)
+ rtest.OK(t, err)
+
+ otherRepo, err := OpenRepository(context.TODO(), env2.gopts)
+ rtest.OK(t, err)
+
+ rtest.Assert(t, repo.Config().ChunkerPolynomial == otherRepo.Config().ChunkerPolynomial,
+ "expected equal chunker polynomials, got %v expected %v", repo.Config().ChunkerPolynomial,
+ otherRepo.Config().ChunkerPolynomial)
+}
diff --git a/cmd/restic/cmd_key.go b/cmd/restic/cmd_key.go
index 88b6d5c0c..62521d762 100644
--- a/cmd/restic/cmd_key.go
+++ b/cmd/restic/cmd_key.go
@@ -212,7 +212,7 @@ func runKey(ctx context.Context, gopts GlobalOptions, args []string) error {
switch args[0] {
case "list":
- lock, ctx, err := lockRepo(ctx, repo)
+ lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
@@ -220,7 +220,7 @@ func runKey(ctx context.Context, gopts GlobalOptions, args []string) error {
return listKeys(ctx, repo, gopts)
case "add":
- lock, ctx, err := lockRepo(ctx, repo)
+ lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
@@ -228,7 +228,7 @@ func runKey(ctx context.Context, gopts GlobalOptions, args []string) error {
return addKey(ctx, repo, gopts)
case "remove":
- lock, ctx, err := lockRepoExclusive(ctx, repo)
+ lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
@@ -241,7 +241,7 @@ func runKey(ctx context.Context, gopts GlobalOptions, args []string) error {
return deleteKey(ctx, repo, id)
case "passwd":
- lock, ctx, err := lockRepoExclusive(ctx, repo)
+ lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
diff --git a/cmd/restic/cmd_key_integration_test.go b/cmd/restic/cmd_key_integration_test.go
new file mode 100644
index 000000000..9ea5795ba
--- /dev/null
+++ b/cmd/restic/cmd_key_integration_test.go
@@ -0,0 +1,145 @@
+package main
+
+import (
+ "bufio"
+ "context"
+ "regexp"
+ "testing"
+
+ "github.com/restic/restic/internal/repository"
+ "github.com/restic/restic/internal/restic"
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func testRunKeyListOtherIDs(t testing.TB, gopts GlobalOptions) []string {
+ buf, err := withCaptureStdout(func() error {
+ return runKey(context.TODO(), gopts, []string{"list"})
+ })
+ rtest.OK(t, err)
+
+ scanner := bufio.NewScanner(buf)
+ exp := regexp.MustCompile(`^ ([a-f0-9]+) `)
+
+ IDs := []string{}
+ for scanner.Scan() {
+ if id := exp.FindStringSubmatch(scanner.Text()); id != nil {
+ IDs = append(IDs, id[1])
+ }
+ }
+
+ return IDs
+}
+
+func testRunKeyAddNewKey(t testing.TB, newPassword string, gopts GlobalOptions) {
+ testKeyNewPassword = newPassword
+ defer func() {
+ testKeyNewPassword = ""
+ }()
+
+ rtest.OK(t, runKey(context.TODO(), gopts, []string{"add"}))
+}
+
+func testRunKeyAddNewKeyUserHost(t testing.TB, gopts GlobalOptions) {
+ testKeyNewPassword = "john's geheimnis"
+ defer func() {
+ testKeyNewPassword = ""
+ keyUsername = ""
+ keyHostname = ""
+ }()
+
+ rtest.OK(t, cmdKey.Flags().Parse([]string{"--user=john", "--host=example.com"}))
+
+ t.Log("adding key for john@example.com")
+ rtest.OK(t, runKey(context.TODO(), gopts, []string{"add"}))
+
+ repo, err := OpenRepository(context.TODO(), gopts)
+ rtest.OK(t, err)
+ key, err := repository.SearchKey(context.TODO(), repo, testKeyNewPassword, 2, "")
+ rtest.OK(t, err)
+
+ rtest.Equals(t, "john", key.Username)
+ rtest.Equals(t, "example.com", key.Hostname)
+}
+
+func testRunKeyPasswd(t testing.TB, newPassword string, gopts GlobalOptions) {
+ testKeyNewPassword = newPassword
+ defer func() {
+ testKeyNewPassword = ""
+ }()
+
+ rtest.OK(t, runKey(context.TODO(), gopts, []string{"passwd"}))
+}
+
+func testRunKeyRemove(t testing.TB, gopts GlobalOptions, IDs []string) {
+ t.Logf("remove %d keys: %q\n", len(IDs), IDs)
+ for _, id := range IDs {
+ rtest.OK(t, runKey(context.TODO(), gopts, []string{"remove", id}))
+ }
+}
+
+func TestKeyAddRemove(t *testing.T) {
+ passwordList := []string{
+ "OnnyiasyatvodsEvVodyawit",
+ "raicneirvOjEfEigonOmLasOd",
+ }
+
+ env, cleanup := withTestEnvironment(t)
+ // must list keys more than once
+ env.gopts.backendTestHook = nil
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ testRunKeyPasswd(t, "geheim2", env.gopts)
+ env.gopts.password = "geheim2"
+ t.Logf("changed password to %q", env.gopts.password)
+
+ for _, newPassword := range passwordList {
+ testRunKeyAddNewKey(t, newPassword, env.gopts)
+ t.Logf("added new password %q", newPassword)
+ env.gopts.password = newPassword
+ testRunKeyRemove(t, env.gopts, testRunKeyListOtherIDs(t, env.gopts))
+ }
+
+ env.gopts.password = passwordList[len(passwordList)-1]
+ t.Logf("testing access with last password %q\n", env.gopts.password)
+ rtest.OK(t, runKey(context.TODO(), env.gopts, []string{"list"}))
+ testRunCheck(t, env.gopts)
+
+ testRunKeyAddNewKeyUserHost(t, env.gopts)
+}
+
+type emptySaveBackend struct {
+ restic.Backend
+}
+
+func (b *emptySaveBackend) Save(ctx context.Context, h restic.Handle, _ restic.RewindReader) error {
+ return b.Backend.Save(ctx, h, restic.NewByteReader([]byte{}, nil))
+}
+
+func TestKeyProblems(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+ env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) {
+ return &emptySaveBackend{r}, nil
+ }
+
+ testKeyNewPassword = "geheim2"
+ defer func() {
+ testKeyNewPassword = ""
+ }()
+
+ err := runKey(context.TODO(), env.gopts, []string{"passwd"})
+ t.Log(err)
+ rtest.Assert(t, err != nil, "expected passwd change to fail")
+
+ err = runKey(context.TODO(), env.gopts, []string{"add"})
+ t.Log(err)
+ rtest.Assert(t, err != nil, "expected key adding to fail")
+
+ t.Logf("testing access with initial password %q\n", env.gopts.password)
+ rtest.OK(t, runKey(context.TODO(), env.gopts, []string{"list"}))
+ testRunCheck(t, env.gopts)
+}
diff --git a/cmd/restic/cmd_list.go b/cmd/restic/cmd_list.go
index 4809092c0..bd02cedc7 100644
--- a/cmd/restic/cmd_list.go
+++ b/cmd/restic/cmd_list.go
@@ -31,19 +31,19 @@ func init() {
cmdRoot.AddCommand(cmdList)
}
-func runList(ctx context.Context, cmd *cobra.Command, opts GlobalOptions, args []string) error {
+func runList(ctx context.Context, cmd *cobra.Command, gopts GlobalOptions, args []string) error {
if len(args) != 1 {
return errors.Fatal("type not specified, usage: " + cmd.Use)
}
- repo, err := OpenRepository(ctx, opts)
+ repo, err := OpenRepository(ctx, gopts)
if err != nil {
return err
}
- if !opts.NoLock && args[0] != "locks" {
+ if !gopts.NoLock && args[0] != "locks" {
var lock *restic.Lock
- lock, ctx, err = lockRepo(ctx, repo)
+ lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
diff --git a/cmd/restic/cmd_list_integration_test.go b/cmd/restic/cmd_list_integration_test.go
new file mode 100644
index 000000000..4140a3ea8
--- /dev/null
+++ b/cmd/restic/cmd_list_integration_test.go
@@ -0,0 +1,44 @@
+package main
+
+import (
+ "bufio"
+ "context"
+ "io"
+ "testing"
+
+ "github.com/restic/restic/internal/restic"
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func testRunList(t testing.TB, tpe string, opts GlobalOptions) restic.IDs {
+ buf, err := withCaptureStdout(func() error {
+ return runList(context.TODO(), cmdList, opts, []string{tpe})
+ })
+ rtest.OK(t, err)
+ return parseIDsFromReader(t, buf)
+}
+
+func parseIDsFromReader(t testing.TB, rd io.Reader) restic.IDs {
+ t.Helper()
+ IDs := restic.IDs{}
+ sc := bufio.NewScanner(rd)
+
+ for sc.Scan() {
+ id, err := restic.ParseID(sc.Text())
+ if err != nil {
+ t.Logf("parse id %v: %v", sc.Text(), err)
+ continue
+ }
+
+ IDs = append(IDs, id)
+ }
+
+ return IDs
+}
+
+func testListSnapshots(t testing.TB, opts GlobalOptions, expected int) restic.IDs {
+ t.Helper()
+ snapshotIDs := testRunList(t, "snapshots", opts)
+ rtest.Assert(t, len(snapshotIDs) == expected, "expected %v snapshot, got %v", expected, snapshotIDs)
+ return snapshotIDs
+}
diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go
index aeaa750eb..256c9e002 100644
--- a/cmd/restic/cmd_ls.go
+++ b/cmd/restic/cmd_ls.go
@@ -50,7 +50,8 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
type LsOptions struct {
ListLong bool
restic.SnapshotFilter
- Recursive bool
+ Recursive bool
+ HumanReadable bool
}
var lsOptions LsOptions
@@ -62,6 +63,7 @@ func init() {
initSingleSnapshotFilter(flags, &lsOptions.SnapshotFilter)
flags.BoolVarP(&lsOptions.ListLong, "long", "l", false, "use a long listing format showing size and mode")
flags.BoolVar(&lsOptions.Recursive, "recursive", false, "include files in subfolders of the listed directories")
+ flags.BoolVar(&lsOptions.HumanReadable, "human-readable", false, "print sizes in human readable format")
}
type lsSnapshot struct {
@@ -181,7 +183,7 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri
)
if gopts.JSON {
- enc := json.NewEncoder(gopts.stdout)
+ enc := json.NewEncoder(globalOptions.stdout)
printSnapshot = func(sn *restic.Snapshot) {
err := enc.Encode(lsSnapshot{
@@ -206,11 +208,11 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri
Verbosef("snapshot %s of %v filtered by %v at %s):\n", sn.ID().Str(), sn.Paths, dirs, sn.Time)
}
printNode = func(path string, node *restic.Node) {
- Printf("%s\n", formatNode(path, node, lsOptions.ListLong))
+ Printf("%s\n", formatNode(path, node, lsOptions.ListLong, lsOptions.HumanReadable))
}
}
- sn, err := (&restic.SnapshotFilter{
+ sn, subfolder, err := (&restic.SnapshotFilter{
Hosts: opts.Hosts,
Paths: opts.Paths,
Tags: opts.Tags,
@@ -219,6 +221,11 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri
return err
}
+ sn.Tree, err = restic.FindTreeDirectory(ctx, repo, sn.Tree, subfolder)
+ if err != nil {
+ return err
+ }
+
printSnapshot(sn)
err = walker.Walk(ctx, repo, *sn.Tree, nil, func(_ restic.ID, nodepath string, node *restic.Node, err error) (bool, error) {
diff --git a/cmd/restic/cmd_ls_integration_test.go b/cmd/restic/cmd_ls_integration_test.go
new file mode 100644
index 000000000..39bf9c3b0
--- /dev/null
+++ b/cmd/restic/cmd_ls_integration_test.go
@@ -0,0 +1,19 @@
+package main
+
+import (
+ "context"
+ "strings"
+ "testing"
+
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func testRunLs(t testing.TB, gopts GlobalOptions, snapshotID string) []string {
+ buf, err := withCaptureStdout(func() error {
+ gopts.Quiet = true
+ opts := LsOptions{}
+ return runLs(context.TODO(), opts, gopts, []string{snapshotID})
+ })
+ rtest.OK(t, err)
+ return strings.Split(buf.String(), "\n")
+}
diff --git a/cmd/restic/cmd_migrate.go b/cmd/restic/cmd_migrate.go
index 6d614be39..fd2e762c0 100644
--- a/cmd/restic/cmd_migrate.go
+++ b/cmd/restic/cmd_migrate.go
@@ -122,7 +122,7 @@ func runMigrate(ctx context.Context, opts MigrateOptions, gopts GlobalOptions, a
return err
}
- lock, ctx, err := lockRepoExclusive(ctx, repo)
+ lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
diff --git a/cmd/restic/cmd_mount.go b/cmd/restic/cmd_mount.go
index 0501bfe89..ec3662d5c 100644
--- a/cmd/restic/cmd_mount.go
+++ b/cmd/restic/cmd_mount.go
@@ -123,7 +123,7 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args
if !gopts.NoLock {
var lock *restic.Lock
- lock, ctx, err = lockRepo(ctx, repo)
+ lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
diff --git a/cmd/restic/integration_fuse_test.go b/cmd/restic/cmd_mount_integration_test.go
index a99064b8f..1b069d582 100644
--- a/cmd/restic/integration_fuse_test.go
+++ b/cmd/restic/cmd_mount_integration_test.go
@@ -12,6 +12,7 @@ import (
"testing"
"time"
+ "github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
@@ -63,7 +64,7 @@ func testRunMount(t testing.TB, gopts GlobalOptions, dir string, wg *sync.WaitGr
rtest.OK(t, runMount(context.TODO(), opts, gopts, []string{dir}))
}
-func testRunUmount(t testing.TB, gopts GlobalOptions, dir string) {
+func testRunUmount(t testing.TB, dir string) {
var err error
for i := 0; i < mountWait; i++ {
if err = umount(dir); err == nil {
@@ -94,7 +95,7 @@ func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Reposit
go testRunMount(t, global, mountpoint, &wg)
waitForMount(t, mountpoint)
defer wg.Wait()
- defer testRunUmount(t, global, mountpoint)
+ defer testRunUmount(t, mountpoint)
if !snapshotsDirExists(t, mountpoint) {
t.Fatal(`virtual directory "snapshots" doesn't exist`)
@@ -159,6 +160,11 @@ func TestMount(t *testing.T) {
t.Skip("Skipping fuse tests")
}
+ debugEnabled := debug.TestLogToStderr(t)
+ if debugEnabled {
+ defer debug.TestDisableLog(t)
+ }
+
env, cleanup := withTestEnvironment(t)
// must list snapshots more than once
env.gopts.backendTestHook = nil
diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go
index f59be2967..e4c2c7b29 100644
--- a/cmd/restic/cmd_prune.go
+++ b/cmd/restic/cmd_prune.go
@@ -3,6 +3,7 @@ package main
import (
"context"
"math"
+ "runtime"
"sort"
"strconv"
"strings"
@@ -80,7 +81,7 @@ func addPruneOptions(c *cobra.Command) {
func verifyPruneOptions(opts *PruneOptions) error {
opts.MaxRepackBytes = math.MaxUint64
if len(opts.MaxRepackSize) > 0 {
- size, err := parseSizeStr(opts.MaxRepackSize)
+ size, err := ui.ParseBytes(opts.MaxRepackSize)
if err != nil {
return err
}
@@ -123,7 +124,7 @@ func verifyPruneOptions(opts *PruneOptions) error {
}
default:
- size, err := parseSizeStr(maxUnused)
+ size, err := ui.ParseBytes(maxUnused)
if err != nil {
return errors.Fatalf("invalid number of bytes %q for --max-unused: %v", opts.MaxUnused, err)
}
@@ -167,7 +168,7 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error
opts.unsafeRecovery = true
}
- lock, ctx, err := lockRepoExclusive(ctx, repo)
+ lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
@@ -205,6 +206,9 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption
return err
}
+ // Trigger GC to reset garbage collection threshold
+ runtime.GC()
+
return doPrune(ctx, opts, gopts, repo, plan)
}
@@ -488,7 +492,7 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi
// Pack size does not fit and pack is needed => error
// If the pack is not needed, this is no error, the pack can
// and will be simply removed, see below.
- Warnf("pack %s: calculated size %d does not match real size %d\nRun 'restic rebuild-index'.\n",
+ Warnf("pack %s: calculated size %d does not match real size %d\nRun 'restic repair index'.\n",
id.Str(), p.unusedSize+p.usedSize, packSize)
return errorSizeNotMatching
}
@@ -729,7 +733,7 @@ func doPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo r
_, err := repository.Repack(ctx, repo, repo, plan.repackPacks, plan.keepBlobs, bar)
bar.Done()
if err != nil {
- return errors.Fatalf("%s", err)
+ return errors.Fatal(err.Error())
}
// Also remove repacked packs
diff --git a/cmd/restic/cmd_prune_integration_test.go b/cmd/restic/cmd_prune_integration_test.go
new file mode 100644
index 000000000..2cd86d895
--- /dev/null
+++ b/cmd/restic/cmd_prune_integration_test.go
@@ -0,0 +1,221 @@
+package main
+
+import (
+ "context"
+ "encoding/json"
+ "path/filepath"
+ "testing"
+
+ "github.com/restic/restic/internal/restic"
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func testRunPrune(t testing.TB, gopts GlobalOptions, opts PruneOptions) {
+ oldHook := gopts.backendTestHook
+ gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) { return newListOnceBackend(r), nil }
+ defer func() {
+ gopts.backendTestHook = oldHook
+ }()
+ rtest.OK(t, runPrune(context.TODO(), opts, gopts))
+}
+
+func TestPrune(t *testing.T) {
+ testPruneVariants(t, false)
+ testPruneVariants(t, true)
+}
+
+func testPruneVariants(t *testing.T, unsafeNoSpaceRecovery bool) {
+ suffix := ""
+ if unsafeNoSpaceRecovery {
+ suffix = "-recovery"
+ }
+ t.Run("0"+suffix, func(t *testing.T) {
+ opts := PruneOptions{MaxUnused: "0%", unsafeRecovery: unsafeNoSpaceRecovery}
+ checkOpts := CheckOptions{ReadData: true, CheckUnused: true}
+ testPrune(t, opts, checkOpts)
+ })
+
+ t.Run("50"+suffix, func(t *testing.T) {
+ opts := PruneOptions{MaxUnused: "50%", unsafeRecovery: unsafeNoSpaceRecovery}
+ checkOpts := CheckOptions{ReadData: true}
+ testPrune(t, opts, checkOpts)
+ })
+
+ t.Run("unlimited"+suffix, func(t *testing.T) {
+ opts := PruneOptions{MaxUnused: "unlimited", unsafeRecovery: unsafeNoSpaceRecovery}
+ checkOpts := CheckOptions{ReadData: true}
+ testPrune(t, opts, checkOpts)
+ })
+
+ t.Run("CachableOnly"+suffix, func(t *testing.T) {
+ opts := PruneOptions{MaxUnused: "5%", RepackCachableOnly: true, unsafeRecovery: unsafeNoSpaceRecovery}
+ checkOpts := CheckOptions{ReadData: true}
+ testPrune(t, opts, checkOpts)
+ })
+ t.Run("Small", func(t *testing.T) {
+ opts := PruneOptions{MaxUnused: "unlimited", RepackSmall: true}
+ checkOpts := CheckOptions{ReadData: true, CheckUnused: true}
+ testPrune(t, opts, checkOpts)
+ })
+}
+
+func createPrunableRepo(t *testing.T, env *testEnvironment) {
+ testSetupBackupData(t, env)
+ opts := BackupOptions{}
+
+ testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
+ firstSnapshot := testListSnapshots(t, env.gopts, 1)[0]
+
+ testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
+ testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
+ testListSnapshots(t, env.gopts, 3)
+
+ testRunForgetJSON(t, env.gopts)
+ testRunForget(t, env.gopts, firstSnapshot.String())
+}
+
+func testRunForgetJSON(t testing.TB, gopts GlobalOptions, args ...string) {
+ buf, err := withCaptureStdout(func() error {
+ gopts.JSON = true
+ opts := ForgetOptions{
+ DryRun: true,
+ Last: 1,
+ }
+ return runForget(context.TODO(), opts, gopts, args)
+ })
+ rtest.OK(t, err)
+
+ var forgets []*ForgetGroup
+ rtest.OK(t, json.Unmarshal(buf.Bytes(), &forgets))
+
+ rtest.Assert(t, len(forgets) == 1,
+ "Expected 1 snapshot group, got %v", len(forgets))
+ rtest.Assert(t, len(forgets[0].Keep) == 1,
+ "Expected 1 snapshot to be kept, got %v", len(forgets[0].Keep))
+ rtest.Assert(t, len(forgets[0].Remove) == 2,
+ "Expected 2 snapshots to be removed, got %v", len(forgets[0].Remove))
+}
+
+func testPrune(t *testing.T, pruneOpts PruneOptions, checkOpts CheckOptions) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ createPrunableRepo(t, env)
+ testRunPrune(t, env.gopts, pruneOpts)
+ rtest.OK(t, runCheck(context.TODO(), checkOpts, env.gopts, nil))
+}
+
+var pruneDefaultOptions = PruneOptions{MaxUnused: "5%"}
+
+func TestPruneWithDamagedRepository(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := filepath.Join("testdata", "backup-data.tar.gz")
+ testRunInit(t, env.gopts)
+
+ rtest.SetupTarTestFixture(t, env.testdata, datafile)
+ opts := BackupOptions{}
+
+ // create and delete snapshot to create unused blobs
+ testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
+ firstSnapshot := testListSnapshots(t, env.gopts, 1)[0]
+ testRunForget(t, env.gopts, firstSnapshot.String())
+
+ oldPacks := listPacks(env.gopts, t)
+
+ // create new snapshot, but lose all data
+ testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
+ testListSnapshots(t, env.gopts, 1)
+ removePacksExcept(env.gopts, t, oldPacks, false)
+
+ oldHook := env.gopts.backendTestHook
+ env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) { return newListOnceBackend(r), nil }
+ defer func() {
+ env.gopts.backendTestHook = oldHook
+ }()
+ // prune should fail
+ rtest.Assert(t, runPrune(context.TODO(), pruneDefaultOptions, env.gopts) == errorPacksMissing,
+ "prune should have reported index not complete error")
+}
+
+// Test repos for edge cases
+func TestEdgeCaseRepos(t *testing.T) {
+ opts := CheckOptions{}
+
+ // repo where index is completely missing
+ // => check and prune should fail
+ t.Run("no-index", func(t *testing.T) {
+ testEdgeCaseRepo(t, "repo-index-missing.tar.gz", opts, pruneDefaultOptions, false, false)
+ })
+
+ // repo where an existing and used blob is missing from the index
+ // => check and prune should fail
+ t.Run("index-missing-blob", func(t *testing.T) {
+ testEdgeCaseRepo(t, "repo-index-missing-blob.tar.gz", opts, pruneDefaultOptions, false, false)
+ })
+
+ // repo where a blob is missing
+ // => check and prune should fail
+ t.Run("missing-data", func(t *testing.T) {
+ testEdgeCaseRepo(t, "repo-data-missing.tar.gz", opts, pruneDefaultOptions, false, false)
+ })
+
+ // repo where blobs which are not needed are missing or in invalid pack files
+ // => check should fail and prune should repair this
+ t.Run("missing-unused-data", func(t *testing.T) {
+ testEdgeCaseRepo(t, "repo-unused-data-missing.tar.gz", opts, pruneDefaultOptions, false, true)
+ })
+
+ // repo where data exists that is not referenced
+ // => check and prune should fully work
+ t.Run("unreferenced-data", func(t *testing.T) {
+ testEdgeCaseRepo(t, "repo-unreferenced-data.tar.gz", opts, pruneDefaultOptions, true, true)
+ })
+
+ // repo where an obsolete index still exists
+ // => check and prune should fully work
+ t.Run("obsolete-index", func(t *testing.T) {
+ testEdgeCaseRepo(t, "repo-obsolete-index.tar.gz", opts, pruneDefaultOptions, true, true)
+ })
+
+ // repo which contains mixed (data/tree) packs
+ // => check and prune should fully work
+ t.Run("mixed-packs", func(t *testing.T) {
+ testEdgeCaseRepo(t, "repo-mixed.tar.gz", opts, pruneDefaultOptions, true, true)
+ })
+
+ // repo which contains duplicate blobs
+ // => checking for unused data should report an error and prune resolves the
+ // situation
+ opts = CheckOptions{
+ ReadData: true,
+ CheckUnused: true,
+ }
+ t.Run("duplicates", func(t *testing.T) {
+ testEdgeCaseRepo(t, "repo-duplicates.tar.gz", opts, pruneDefaultOptions, false, true)
+ })
+}
+
+func testEdgeCaseRepo(t *testing.T, tarfile string, optionsCheck CheckOptions, optionsPrune PruneOptions, checkOK, pruneOK bool) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := filepath.Join("testdata", tarfile)
+ rtest.SetupTarTestFixture(t, env.base, datafile)
+
+ if checkOK {
+ testRunCheck(t, env.gopts)
+ } else {
+ rtest.Assert(t, runCheck(context.TODO(), optionsCheck, env.gopts, nil) != nil,
+ "check should have reported an error")
+ }
+
+ if pruneOK {
+ testRunPrune(t, env.gopts, optionsPrune)
+ testRunCheck(t, env.gopts)
+ } else {
+ rtest.Assert(t, runPrune(context.TODO(), optionsPrune, env.gopts) != nil,
+ "prune should have reported an error")
+ }
+}
diff --git a/cmd/restic/cmd_recover.go b/cmd/restic/cmd_recover.go
index 65f4c8750..85dcc23d7 100644
--- a/cmd/restic/cmd_recover.go
+++ b/cmd/restic/cmd_recover.go
@@ -46,7 +46,7 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error {
return err
}
- lock, ctx, err := lockRepo(ctx, repo)
+ lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
diff --git a/cmd/restic/cmd_repair.go b/cmd/restic/cmd_repair.go
new file mode 100644
index 000000000..aefe02f3c
--- /dev/null
+++ b/cmd/restic/cmd_repair.go
@@ -0,0 +1,14 @@
+package main
+
+import (
+ "github.com/spf13/cobra"
+)
+
+var cmdRepair = &cobra.Command{
+ Use: "repair",
+ Short: "Repair the repository",
+}
+
+func init() {
+ cmdRoot.AddCommand(cmdRepair)
+}
diff --git a/cmd/restic/cmd_rebuild_index.go b/cmd/restic/cmd_repair_index.go
index 6d49cb917..b1905836a 100644
--- a/cmd/restic/cmd_rebuild_index.go
+++ b/cmd/restic/cmd_repair_index.go
@@ -7,15 +7,15 @@ import (
"github.com/restic/restic/internal/pack"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
-
"github.com/spf13/cobra"
+ "github.com/spf13/pflag"
)
-var cmdRebuildIndex = &cobra.Command{
- Use: "rebuild-index [flags]",
+var cmdRepairIndex = &cobra.Command{
+ Use: "index [flags]",
Short: "Build a new index",
Long: `
-The "rebuild-index" command creates a new index based on the pack files in the
+The "repair index" command creates a new index based on the pack files in the
repository.
EXIT STATUS
@@ -25,40 +25,52 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
- return runRebuildIndex(cmd.Context(), rebuildIndexOptions, globalOptions)
+ return runRebuildIndex(cmd.Context(), repairIndexOptions, globalOptions)
},
}
-// RebuildIndexOptions collects all options for the rebuild-index command.
-type RebuildIndexOptions struct {
+var cmdRebuildIndex = &cobra.Command{
+ Use: "rebuild-index [flags]",
+ Short: cmdRepairIndex.Short,
+ Long: cmdRepairIndex.Long,
+ Deprecated: `Use "repair index" instead`,
+ DisableAutoGenTag: true,
+ RunE: cmdRepairIndex.RunE,
+}
+
+// RepairIndexOptions collects all options for the repair index command.
+type RepairIndexOptions struct {
ReadAllPacks bool
}
-var rebuildIndexOptions RebuildIndexOptions
+var repairIndexOptions RepairIndexOptions
func init() {
+ cmdRepair.AddCommand(cmdRepairIndex)
+ // add alias for old name
cmdRoot.AddCommand(cmdRebuildIndex)
- f := cmdRebuildIndex.Flags()
- f.BoolVar(&rebuildIndexOptions.ReadAllPacks, "read-all-packs", false, "read all pack files to generate new index from scratch")
+ for _, f := range []*pflag.FlagSet{cmdRepairIndex.Flags(), cmdRebuildIndex.Flags()} {
+ f.BoolVar(&repairIndexOptions.ReadAllPacks, "read-all-packs", false, "read all pack files to generate new index from scratch")
+ }
}
-func runRebuildIndex(ctx context.Context, opts RebuildIndexOptions, gopts GlobalOptions) error {
+func runRebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions) error {
repo, err := OpenRepository(ctx, gopts)
if err != nil {
return err
}
- lock, ctx, err := lockRepoExclusive(ctx, repo)
+ lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
- return rebuildIndex(ctx, opts, gopts, repo, restic.NewIDSet())
+ return rebuildIndex(ctx, opts, gopts, repo)
}
-func rebuildIndex(ctx context.Context, opts RebuildIndexOptions, gopts GlobalOptions, repo *repository.Repository, ignorePacks restic.IDSet) error {
+func rebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions, repo *repository.Repository) error {
var obsoleteIndexes restic.IDs
packSizeFromList := make(map[restic.ID]int64)
packSizeFromIndex := make(map[restic.ID]int64)
@@ -130,7 +142,7 @@ func rebuildIndex(ctx context.Context, opts RebuildIndexOptions, gopts GlobalOpt
if len(packSizeFromList) > 0 {
Verbosef("reading pack files\n")
- bar := newProgressMax(!globalOptions.Quiet, uint64(len(packSizeFromList)), "packs")
+ bar := newProgressMax(!gopts.Quiet, uint64(len(packSizeFromList)), "packs")
invalidFiles, err := repo.CreateIndexFromPacks(ctx, packSizeFromList, bar)
bar.Done()
if err != nil {
diff --git a/cmd/restic/cmd_repair_index_integration_test.go b/cmd/restic/cmd_repair_index_integration_test.go
new file mode 100644
index 000000000..f451173a3
--- /dev/null
+++ b/cmd/restic/cmd_repair_index_integration_test.go
@@ -0,0 +1,140 @@
+package main
+
+import (
+ "context"
+ "io"
+ "path/filepath"
+ "strings"
+ "sync"
+ "testing"
+
+ "github.com/restic/restic/internal/errors"
+ "github.com/restic/restic/internal/index"
+ "github.com/restic/restic/internal/restic"
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func testRunRebuildIndex(t testing.TB, gopts GlobalOptions) {
+ rtest.OK(t, withRestoreGlobalOptions(func() error {
+ globalOptions.stdout = io.Discard
+ return runRebuildIndex(context.TODO(), RepairIndexOptions{}, gopts)
+ }))
+}
+
+func testRebuildIndex(t *testing.T, backendTestHook backendWrapper) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := filepath.Join("..", "..", "internal", "checker", "testdata", "duplicate-packs-in-index-test-repo.tar.gz")
+ rtest.SetupTarTestFixture(t, env.base, datafile)
+
+ out, err := testRunCheckOutput(env.gopts, false)
+ if !strings.Contains(out, "contained in several indexes") {
+ t.Fatalf("did not find checker hint for packs in several indexes")
+ }
+
+ if err != nil {
+ t.Fatalf("expected no error from checker for test repository, got %v", err)
+ }
+
+ if !strings.Contains(out, "restic repair index") {
+ t.Fatalf("did not find hint for repair index command")
+ }
+
+ env.gopts.backendTestHook = backendTestHook
+ testRunRebuildIndex(t, env.gopts)
+
+ env.gopts.backendTestHook = nil
+ out, err = testRunCheckOutput(env.gopts, false)
+ if len(out) != 0 {
+ t.Fatalf("expected no output from the checker, got: %v", out)
+ }
+
+ if err != nil {
+ t.Fatalf("expected no error from checker after repair index, got: %v", err)
+ }
+}
+
+func TestRebuildIndex(t *testing.T) {
+ testRebuildIndex(t, nil)
+}
+
+func TestRebuildIndexAlwaysFull(t *testing.T) {
+ indexFull := index.IndexFull
+ defer func() {
+ index.IndexFull = indexFull
+ }()
+ index.IndexFull = func(*index.Index, bool) bool { return true }
+ testRebuildIndex(t, nil)
+}
+
+// indexErrorBackend modifies the first index after reading.
+type indexErrorBackend struct {
+ restic.Backend
+ lock sync.Mutex
+ hasErred bool
+}
+
+func (b *indexErrorBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64, consumer func(rd io.Reader) error) error {
+ return b.Backend.Load(ctx, h, length, offset, func(rd io.Reader) error {
+ // protect hasErred
+ b.lock.Lock()
+ defer b.lock.Unlock()
+ if !b.hasErred && h.Type == restic.IndexFile {
+ b.hasErred = true
+ return consumer(errorReadCloser{rd})
+ }
+ return consumer(rd)
+ })
+}
+
+type errorReadCloser struct {
+ io.Reader
+}
+
+func (erd errorReadCloser) Read(p []byte) (int, error) {
+ n, err := erd.Reader.Read(p)
+ if n > 0 {
+ p[0] ^= 1
+ }
+ return n, err
+}
+
+func TestRebuildIndexDamage(t *testing.T) {
+ testRebuildIndex(t, func(r restic.Backend) (restic.Backend, error) {
+ return &indexErrorBackend{
+ Backend: r,
+ }, nil
+ })
+}
+
+type appendOnlyBackend struct {
+ restic.Backend
+}
+
+// called via repo.Backend().Remove()
+func (b *appendOnlyBackend) Remove(_ context.Context, h restic.Handle) error {
+ return errors.Errorf("Failed to remove %v", h)
+}
+
+func TestRebuildIndexFailsOnAppendOnly(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := filepath.Join("..", "..", "internal", "checker", "testdata", "duplicate-packs-in-index-test-repo.tar.gz")
+ rtest.SetupTarTestFixture(t, env.base, datafile)
+
+ err := withRestoreGlobalOptions(func() error {
+ globalOptions.stdout = io.Discard
+
+ env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) {
+ return &appendOnlyBackend{r}, nil
+ }
+ return runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts)
+ })
+
+ if err == nil {
+ t.Error("expected rebuildIndex to fail")
+ }
+ t.Log(err)
+}
diff --git a/cmd/restic/cmd_repair_snapshots.go b/cmd/restic/cmd_repair_snapshots.go
new file mode 100644
index 000000000..03736795c
--- /dev/null
+++ b/cmd/restic/cmd_repair_snapshots.go
@@ -0,0 +1,176 @@
+package main
+
+import (
+ "context"
+
+ "github.com/restic/restic/internal/backend"
+ "github.com/restic/restic/internal/errors"
+ "github.com/restic/restic/internal/restic"
+ "github.com/restic/restic/internal/walker"
+
+ "github.com/spf13/cobra"
+)
+
+var cmdRepairSnapshots = &cobra.Command{
+ Use: "snapshots [flags] [snapshot ID] [...]",
+ Short: "Repair snapshots",
+ Long: `
+The "repair snapshots" command repairs broken snapshots. It scans the given
+snapshots and generates new ones with damaged directories and file contents
+removed. If the broken snapshots are deleted, a prune run will be able to
+clean up the repository.
+
+The command depends on a correct index, thus make sure to run "repair index"
+first!
+
+
+WARNING
+=======
+
+Repairing and deleting broken snapshots causes data loss! It will remove broken
+directories and modify broken files in the modified snapshots.
+
+If the contents of directories and files are still available, the better option
+is to run "backup" which in that case is able to heal existing snapshots. Only
+use the "repair snapshots" command if you need to recover an old and broken
+snapshot!
+
+EXIT STATUS
+===========
+
+Exit status is 0 if the command was successful, and non-zero if there was any error.
+`,
+ DisableAutoGenTag: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return runRepairSnapshots(cmd.Context(), globalOptions, repairSnapshotOptions, args)
+ },
+}
+
+// RepairOptions collects all options for the repair command.
+type RepairOptions struct {
+ DryRun bool
+ Forget bool
+
+ restic.SnapshotFilter
+}
+
+var repairSnapshotOptions RepairOptions
+
+func init() {
+ cmdRepair.AddCommand(cmdRepairSnapshots)
+ flags := cmdRepairSnapshots.Flags()
+
+ flags.BoolVarP(&repairSnapshotOptions.DryRun, "dry-run", "n", false, "do not do anything, just print what would be done")
+ flags.BoolVarP(&repairSnapshotOptions.Forget, "forget", "", false, "remove original snapshots after creating new ones")
+
+ initMultiSnapshotFilter(flags, &repairSnapshotOptions.SnapshotFilter, true)
+}
+
+func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOptions, args []string) error {
+ repo, err := OpenRepository(ctx, gopts)
+ if err != nil {
+ return err
+ }
+
+ if !opts.DryRun {
+ var lock *restic.Lock
+ var err error
+ lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
+ defer unlockRepo(lock)
+ if err != nil {
+ return err
+ }
+ } else {
+ repo.SetDryRun()
+ }
+
+ snapshotLister, err := backend.MemorizeList(ctx, repo.Backend(), restic.SnapshotFile)
+ if err != nil {
+ return err
+ }
+
+ if err := repo.LoadIndex(ctx); err != nil {
+ return err
+ }
+
+ // Three error cases are checked:
+ // - tree is a nil tree (-> will be replaced by an empty tree)
+ // - trees which cannot be loaded (-> the tree contents will be removed)
+ // - files whose contents are not fully available (-> file will be modified)
+ rewriter := walker.NewTreeRewriter(walker.RewriteOpts{
+ RewriteNode: func(node *restic.Node, path string) *restic.Node {
+ if node.Type != "file" {
+ return node
+ }
+
+ ok := true
+ var newContent restic.IDs = restic.IDs{}
+ var newSize uint64
+ // check all contents and remove if not available
+ for _, id := range node.Content {
+ if size, found := repo.LookupBlobSize(id, restic.DataBlob); !found {
+ ok = false
+ } else {
+ newContent = append(newContent, id)
+ newSize += uint64(size)
+ }
+ }
+ if !ok {
+ Verbosef(" file %q: removed missing content\n", path)
+ } else if newSize != node.Size {
+ Verbosef(" file %q: fixed incorrect size\n", path)
+ }
+ // no-ops if already correct
+ node.Content = newContent
+ node.Size = newSize
+ return node
+ },
+ RewriteFailedTree: func(nodeID restic.ID, path string, _ error) (restic.ID, error) {
+ if path == "/" {
+ Verbosef(" dir %q: not readable\n", path)
+ // remove snapshots with invalid root node
+ return restic.ID{}, nil
+ }
+ // If a subtree fails to load, remove it
+ Verbosef(" dir %q: replaced with empty directory\n", path)
+ emptyID, err := restic.SaveTree(ctx, repo, &restic.Tree{})
+ if err != nil {
+ return restic.ID{}, err
+ }
+ return emptyID, nil
+ },
+ AllowUnstableSerialization: true,
+ })
+
+ changedCount := 0
+ for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, args) {
+ Verbosef("\nsnapshot %s of %v at %s)\n", sn.ID().Str(), sn.Paths, sn.Time)
+ changed, err := filterAndReplaceSnapshot(ctx, repo, sn,
+ func(ctx context.Context, sn *restic.Snapshot) (restic.ID, error) {
+ return rewriter.RewriteTree(ctx, repo, "/", *sn.Tree)
+ }, opts.DryRun, opts.Forget, "repaired")
+ if err != nil {
+ return errors.Fatalf("unable to rewrite snapshot ID %q: %v", sn.ID().Str(), err)
+ }
+ if changed {
+ changedCount++
+ }
+ }
+
+ Verbosef("\n")
+ if changedCount == 0 {
+ if !opts.DryRun {
+ Verbosef("no snapshots were modified\n")
+ } else {
+ Verbosef("no snapshots would be modified\n")
+ }
+ } else {
+ if !opts.DryRun {
+ Verbosef("modified %v snapshots\n", changedCount)
+ } else {
+ Verbosef("would modify %v snapshots\n", changedCount)
+ }
+ }
+
+ return nil
+}
diff --git a/cmd/restic/cmd_repair_snapshots_integration_test.go b/cmd/restic/cmd_repair_snapshots_integration_test.go
new file mode 100644
index 000000000..34cd186d3
--- /dev/null
+++ b/cmd/restic/cmd_repair_snapshots_integration_test.go
@@ -0,0 +1,135 @@
+package main
+
+import (
+ "context"
+ "hash/fnv"
+ "io"
+ "math/rand"
+ "os"
+ "path/filepath"
+ "reflect"
+ "testing"
+
+ "github.com/restic/restic/internal/restic"
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func testRunRepairSnapshot(t testing.TB, gopts GlobalOptions, forget bool) {
+ opts := RepairOptions{
+ Forget: forget,
+ }
+
+ rtest.OK(t, runRepairSnapshots(context.TODO(), gopts, opts, nil))
+}
+
+func createRandomFile(t testing.TB, env *testEnvironment, path string, size int) {
+ fn := filepath.Join(env.testdata, path)
+ rtest.OK(t, os.MkdirAll(filepath.Dir(fn), 0o755))
+
+ h := fnv.New64()
+ _, err := h.Write([]byte(path))
+ rtest.OK(t, err)
+ r := rand.New(rand.NewSource(int64(h.Sum64())))
+
+ f, err := os.OpenFile(fn, os.O_CREATE|os.O_RDWR, 0o644)
+ rtest.OK(t, err)
+ _, err = io.Copy(f, io.LimitReader(r, int64(size)))
+ rtest.OK(t, err)
+ rtest.OK(t, f.Close())
+}
+
+func TestRepairSnapshotsWithLostData(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ createRandomFile(t, env, "foo/bar/file", 512*1024)
+ testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
+ testListSnapshots(t, env.gopts, 1)
+ // damage repository
+ removePacksExcept(env.gopts, t, restic.NewIDSet(), false)
+
+ createRandomFile(t, env, "foo/bar/file2", 256*1024)
+ testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
+ snapshotIDs := testListSnapshots(t, env.gopts, 2)
+ testRunCheckMustFail(t, env.gopts)
+
+ // repair but keep broken snapshots
+ testRunRebuildIndex(t, env.gopts)
+ testRunRepairSnapshot(t, env.gopts, false)
+ testListSnapshots(t, env.gopts, 4)
+ testRunCheckMustFail(t, env.gopts)
+
+ // repository must be ok after removing the broken snapshots
+ testRunForget(t, env.gopts, snapshotIDs[0].String(), snapshotIDs[1].String())
+ testListSnapshots(t, env.gopts, 2)
+ _, err := testRunCheckOutput(env.gopts, false)
+ rtest.OK(t, err)
+}
+
+func TestRepairSnapshotsWithLostTree(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ createRandomFile(t, env, "foo/bar/file", 12345)
+ testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
+ oldSnapshot := testListSnapshots(t, env.gopts, 1)
+ oldPacks := testRunList(t, "packs", env.gopts)
+
+ // keep foo/bar unchanged
+ createRandomFile(t, env, "foo/bar2", 1024)
+ testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
+ testListSnapshots(t, env.gopts, 2)
+
+ // remove tree for foo/bar and the now completely broken first snapshot
+ removePacks(env.gopts, t, restic.NewIDSet(oldPacks...))
+ testRunForget(t, env.gopts, oldSnapshot[0].String())
+ testRunCheckMustFail(t, env.gopts)
+
+ // repair
+ testRunRebuildIndex(t, env.gopts)
+ testRunRepairSnapshot(t, env.gopts, true)
+ testListSnapshots(t, env.gopts, 1)
+ _, err := testRunCheckOutput(env.gopts, false)
+ rtest.OK(t, err)
+}
+
+func TestRepairSnapshotsWithLostRootTree(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ createRandomFile(t, env, "foo/bar/file", 12345)
+ testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
+ testListSnapshots(t, env.gopts, 1)
+ oldPacks := testRunList(t, "packs", env.gopts)
+
+ // remove all trees
+ removePacks(env.gopts, t, restic.NewIDSet(oldPacks...))
+ testRunCheckMustFail(t, env.gopts)
+
+ // repair
+ testRunRebuildIndex(t, env.gopts)
+ testRunRepairSnapshot(t, env.gopts, true)
+ testListSnapshots(t, env.gopts, 0)
+ _, err := testRunCheckOutput(env.gopts, false)
+ rtest.OK(t, err)
+}
+
+func TestRepairSnapshotsIntact(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+ testSetupBackupData(t, env)
+ testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{}, env.gopts)
+ oldSnapshotIDs := testListSnapshots(t, env.gopts, 1)
+
+ // use an exclude that will not exclude anything
+ testRunRepairSnapshot(t, env.gopts, false)
+ snapshotIDs := testListSnapshots(t, env.gopts, 1)
+ rtest.Assert(t, reflect.DeepEqual(oldSnapshotIDs, snapshotIDs), "unexpected snapshot id mismatch %v vs. %v", oldSnapshotIDs, snapshotIDs)
+ testRunCheck(t, env.gopts)
+}
diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go
index 579711662..6ef8c99db 100644
--- a/cmd/restic/cmd_restore.go
+++ b/cmd/restic/cmd_restore.go
@@ -3,6 +3,7 @@ package main
import (
"context"
"strings"
+ "sync"
"time"
"github.com/restic/restic/internal/debug"
@@ -10,6 +11,9 @@ import (
"github.com/restic/restic/internal/filter"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/restorer"
+ "github.com/restic/restic/internal/ui"
+ restoreui "github.com/restic/restic/internal/ui/restore"
+ "github.com/restic/restic/internal/ui/termstatus"
"github.com/spf13/cobra"
)
@@ -31,7 +35,31 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
- return runRestore(cmd.Context(), restoreOptions, globalOptions, args)
+ ctx := cmd.Context()
+ var wg sync.WaitGroup
+ cancelCtx, cancel := context.WithCancel(ctx)
+ defer func() {
+ // shutdown termstatus
+ cancel()
+ wg.Wait()
+ }()
+
+ term := termstatus.New(globalOptions.stdout, globalOptions.stderr, globalOptions.Quiet)
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ term.Run(cancelCtx)
+ }()
+
+ // allow usage of warnf / verbosef
+ prevStdout, prevStderr := globalOptions.stdout, globalOptions.stderr
+ defer func() {
+ globalOptions.stdout, globalOptions.stderr = prevStdout, prevStderr
+ }()
+ stdioWrapper := ui.NewStdioWrapper(term)
+ globalOptions.stdout, globalOptions.stderr = stdioWrapper.Stdout(), stdioWrapper.Stderr()
+
+ return runRestore(ctx, restoreOptions, globalOptions, term, args)
},
}
@@ -64,7 +92,9 @@ func init() {
flags.BoolVar(&restoreOptions.Verify, "verify", false, "verify restored files content")
}
-func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, args []string) error {
+func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions,
+ term *termstatus.Terminal, args []string) error {
+
hasExcludes := len(opts.Exclude) > 0 || len(opts.InsensitiveExclude) > 0
hasIncludes := len(opts.Include) > 0 || len(opts.InsensitiveInclude) > 0
@@ -124,14 +154,14 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, a
if !gopts.NoLock {
var lock *restic.Lock
- lock, ctx, err = lockRepo(ctx, repo)
+ lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
}
- sn, err := (&restic.SnapshotFilter{
+ sn, subfolder, err := (&restic.SnapshotFilter{
Hosts: opts.Hosts,
Paths: opts.Paths,
Tags: opts.Tags,
@@ -145,11 +175,25 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, a
return err
}
- res := restorer.NewRestorer(ctx, repo, sn, opts.Sparse)
+ sn.Tree, err = restic.FindTreeDirectory(ctx, repo, sn.Tree, subfolder)
+ if err != nil {
+ return err
+ }
+
+ msg := ui.NewMessage(term, gopts.verbosity)
+ var printer restoreui.ProgressPrinter
+ if gopts.JSON {
+ printer = restoreui.NewJSONProgress(term)
+ } else {
+ printer = restoreui.NewTextProgress(term)
+ }
+
+ progress := restoreui.NewProgress(printer, calculateProgressInterval(!gopts.Quiet, gopts.JSON))
+ res := restorer.NewRestorer(repo, sn, opts.Sparse, progress)
totalErrors := 0
res.Error = func(location string, err error) error {
- Warnf("ignoring error for %s: %s\n", location, err)
+ msg.E("ignoring error for %s: %s\n", location, err)
totalErrors++
return nil
}
@@ -159,12 +203,12 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, a
selectExcludeFilter := func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) {
matched, err := filter.List(excludePatterns, item)
if err != nil {
- Warnf("error for exclude pattern: %v", err)
+ msg.E("error for exclude pattern: %v", err)
}
matchedInsensitive, err := filter.List(insensitiveExcludePatterns, strings.ToLower(item))
if err != nil {
- Warnf("error for iexclude pattern: %v", err)
+ msg.E("error for iexclude pattern: %v", err)
}
// An exclude filter is basically a 'wildcard but foo',
@@ -182,12 +226,12 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, a
selectIncludeFilter := func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) {
matched, childMayMatch, err := filter.ListWithChild(includePatterns, item)
if err != nil {
- Warnf("error for include pattern: %v", err)
+ msg.E("error for include pattern: %v", err)
}
matchedInsensitive, childMayMatchInsensitive, err := filter.ListWithChild(insensitiveIncludePatterns, strings.ToLower(item))
if err != nil {
- Warnf("error for iexclude pattern: %v", err)
+ msg.E("error for iexclude pattern: %v", err)
}
selectedForRestore = matched || matchedInsensitive
@@ -202,19 +246,25 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, a
res.SelectFilter = selectIncludeFilter
}
- Verbosef("restoring %s to %s\n", res.Snapshot(), opts.Target)
+ if !gopts.JSON {
+ msg.P("restoring %s to %s\n", res.Snapshot(), opts.Target)
+ }
err = res.RestoreTo(ctx, opts.Target)
if err != nil {
return err
}
+ progress.Finish()
+
if totalErrors > 0 {
return errors.Fatalf("There were %d errors\n", totalErrors)
}
if opts.Verify {
- Verbosef("verifying files in %s\n", opts.Target)
+ if !gopts.JSON {
+ msg.P("verifying files in %s\n", opts.Target)
+ }
var count int
t0 := time.Now()
count, err = res.VerifyFiles(ctx, opts.Target)
@@ -224,8 +274,11 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, a
if totalErrors > 0 {
return errors.Fatalf("There were %d errors\n", totalErrors)
}
- Verbosef("finished verifying %d files in %s (took %s)\n", count, opts.Target,
- time.Since(t0).Round(time.Millisecond))
+
+ if !gopts.JSON {
+ msg.P("finished verifying %d files in %s (took %s)\n", count, opts.Target,
+ time.Since(t0).Round(time.Millisecond))
+ }
}
return nil
diff --git a/cmd/restic/cmd_restore_integration_test.go b/cmd/restic/cmd_restore_integration_test.go
new file mode 100644
index 000000000..2c7cbe1fb
--- /dev/null
+++ b/cmd/restic/cmd_restore_integration_test.go
@@ -0,0 +1,307 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "io"
+ mrand "math/rand"
+ "os"
+ "path/filepath"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/restic/restic/internal/filter"
+ "github.com/restic/restic/internal/restic"
+ rtest "github.com/restic/restic/internal/test"
+ "github.com/restic/restic/internal/ui/termstatus"
+)
+
+func testRunRestore(t testing.TB, opts GlobalOptions, dir string, snapshotID restic.ID) {
+ testRunRestoreExcludes(t, opts, dir, snapshotID, nil)
+}
+
+func testRunRestoreExcludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, excludes []string) {
+ opts := RestoreOptions{
+ Target: dir,
+ Exclude: excludes,
+ }
+
+ rtest.OK(t, testRunRestoreAssumeFailure(snapshotID.String(), opts, gopts))
+}
+
+func testRunRestoreAssumeFailure(snapshotID string, opts RestoreOptions, gopts GlobalOptions) error {
+ return withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error {
+ return runRestore(ctx, opts, gopts, term, []string{snapshotID})
+ })
+}
+
+func testRunRestoreLatest(t testing.TB, gopts GlobalOptions, dir string, paths []string, hosts []string) {
+ opts := RestoreOptions{
+ Target: dir,
+ SnapshotFilter: restic.SnapshotFilter{
+ Hosts: hosts,
+ Paths: paths,
+ },
+ }
+
+ rtest.OK(t, testRunRestoreAssumeFailure("latest", opts, gopts))
+}
+
+func testRunRestoreIncludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, includes []string) {
+ opts := RestoreOptions{
+ Target: dir,
+ Include: includes,
+ }
+
+ rtest.OK(t, testRunRestoreAssumeFailure(snapshotID.String(), opts, gopts))
+}
+
+func TestRestoreFilter(t *testing.T) {
+ testfiles := []struct {
+ name string
+ size uint
+ }{
+ {"testfile1.c", 100},
+ {"testfile2.exe", 101},
+ {"subdir1/subdir2/testfile3.docx", 102},
+ {"subdir1/subdir2/testfile4.c", 102},
+ }
+
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ for _, testFile := range testfiles {
+ p := filepath.Join(env.testdata, testFile.name)
+ rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
+ rtest.OK(t, appendRandomData(p, testFile.size))
+ }
+
+ opts := BackupOptions{}
+
+ testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ snapshotID := testListSnapshots(t, env.gopts, 1)[0]
+
+ // no restore filter should restore all files
+ testRunRestore(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID)
+ for _, testFile := range testfiles {
+ rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", testFile.name), int64(testFile.size)))
+ }
+
+ for i, pat := range []string{"*.c", "*.exe", "*", "*file3*"} {
+ base := filepath.Join(env.base, fmt.Sprintf("restore%d", i+1))
+ testRunRestoreExcludes(t, env.gopts, base, snapshotID, []string{pat})
+ for _, testFile := range testfiles {
+ err := testFileSize(filepath.Join(base, "testdata", testFile.name), int64(testFile.size))
+ if ok, _ := filter.Match(pat, filepath.Base(testFile.name)); !ok {
+ rtest.OK(t, err)
+ } else {
+ rtest.Assert(t, os.IsNotExist(err),
+ "expected %v to not exist in restore step %v, but it exists, err %v", testFile.name, i+1, err)
+ }
+ }
+ }
+}
+
+func TestRestore(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ for i := 0; i < 10; i++ {
+ p := filepath.Join(env.testdata, fmt.Sprintf("foo/bar/testfile%v", i))
+ rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
+ rtest.OK(t, appendRandomData(p, uint(mrand.Intn(2<<21))))
+ }
+
+ opts := BackupOptions{}
+
+ testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ // Restore latest without any filters
+ restoredir := filepath.Join(env.base, "restore")
+ testRunRestoreLatest(t, env.gopts, restoredir, nil, nil)
+
+ diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, filepath.Base(env.testdata)))
+ rtest.Assert(t, diff == "", "directories are not equal %v", diff)
+}
+
+func TestRestoreLatest(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ p := filepath.Join(env.testdata, "testfile.c")
+ rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
+ rtest.OK(t, appendRandomData(p, 100))
+
+ opts := BackupOptions{}
+
+ // chdir manually here so we can get the current directory. This is not the
+ // same as the temp dir returned by os.MkdirTemp() on darwin.
+ back := rtest.Chdir(t, filepath.Dir(env.testdata))
+ defer back()
+
+ curdir, err := os.Getwd()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ testRunBackup(t, "", []string{filepath.Base(env.testdata)}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ rtest.OK(t, os.Remove(p))
+ rtest.OK(t, appendRandomData(p, 101))
+ testRunBackup(t, "", []string{filepath.Base(env.testdata)}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ // Restore latest without any filters
+ testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore0"), nil, nil)
+ rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", "testfile.c"), int64(101)))
+
+ // Setup test files in different directories backed up in different snapshots
+ p1 := filepath.Join(curdir, filepath.FromSlash("p1/testfile.c"))
+
+ rtest.OK(t, os.MkdirAll(filepath.Dir(p1), 0755))
+ rtest.OK(t, appendRandomData(p1, 102))
+ testRunBackup(t, "", []string{"p1"}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ p2 := filepath.Join(curdir, filepath.FromSlash("p2/testfile.c"))
+
+ rtest.OK(t, os.MkdirAll(filepath.Dir(p2), 0755))
+ rtest.OK(t, appendRandomData(p2, 103))
+ testRunBackup(t, "", []string{"p2"}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ p1rAbs := filepath.Join(env.base, "restore1", "p1/testfile.c")
+ p2rAbs := filepath.Join(env.base, "restore2", "p2/testfile.c")
+
+ testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore1"), []string{filepath.Dir(p1)}, nil)
+ rtest.OK(t, testFileSize(p1rAbs, int64(102)))
+ if _, err := os.Stat(p2rAbs); os.IsNotExist(err) {
+ rtest.Assert(t, os.IsNotExist(err),
+ "expected %v to not exist in restore, but it exists, err %v", p2rAbs, err)
+ }
+
+ testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore2"), []string{filepath.Dir(p2)}, nil)
+ rtest.OK(t, testFileSize(p2rAbs, int64(103)))
+ if _, err := os.Stat(p1rAbs); os.IsNotExist(err) {
+ rtest.Assert(t, os.IsNotExist(err),
+ "expected %v to not exist in restore, but it exists, err %v", p1rAbs, err)
+ }
+}
+
+func TestRestoreWithPermissionFailure(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ datafile := filepath.Join("testdata", "repo-restore-permissions-test.tar.gz")
+ rtest.SetupTarTestFixture(t, env.base, datafile)
+
+ snapshots := testListSnapshots(t, env.gopts, 1)
+
+ _ = withRestoreGlobalOptions(func() error {
+ globalOptions.stderr = io.Discard
+ testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshots[0])
+ return nil
+ })
+
+ // make sure that all files have been restored, regardless of any
+ // permission errors
+ files := testRunLs(t, env.gopts, snapshots[0].String())
+ for _, filename := range files {
+ fi, err := os.Lstat(filepath.Join(env.base, "restore", filename))
+ rtest.OK(t, err)
+
+ rtest.Assert(t, !isFile(fi) || fi.Size() > 0,
+ "file %v restored, but filesize is 0", filename)
+ }
+}
+
+func setZeroModTime(filename string) error {
+ var utimes = []syscall.Timespec{
+ syscall.NsecToTimespec(0),
+ syscall.NsecToTimespec(0),
+ }
+
+ return syscall.UtimesNano(filename, utimes)
+}
+
+func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testRunInit(t, env.gopts)
+
+ p := filepath.Join(env.testdata, "subdir1", "subdir2", "subdir3", "file.ext")
+ rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
+ rtest.OK(t, appendRandomData(p, 200))
+ rtest.OK(t, setZeroModTime(filepath.Join(env.testdata, "subdir1", "subdir2")))
+
+ opts := BackupOptions{}
+
+ testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
+ testRunCheck(t, env.gopts)
+
+ snapshotID := testListSnapshots(t, env.gopts, 1)[0]
+
+ // restore with filter "*.ext", this should restore "file.ext", but
+ // since the directories are ignored and only created because of
+ // "file.ext", no meta data should be restored for them.
+ testRunRestoreIncludes(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID, []string{"*.ext"})
+
+ f1 := filepath.Join(env.base, "restore0", "testdata", "subdir1", "subdir2")
+ _, err := os.Stat(f1)
+ rtest.OK(t, err)
+
+ // restore with filter "*", this should restore meta data on everything.
+ testRunRestoreIncludes(t, env.gopts, filepath.Join(env.base, "restore1"), snapshotID, []string{"*"})
+
+ f2 := filepath.Join(env.base, "restore1", "testdata", "subdir1", "subdir2")
+ fi, err := os.Stat(f2)
+ rtest.OK(t, err)
+
+ rtest.Assert(t, fi.ModTime() == time.Unix(0, 0),
+ "meta data of intermediate directory hasn't been restore")
+}
+
+func TestRestoreLocalLayout(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ var tests = []struct {
+ filename string
+ layout string
+ }{
+ {"repo-layout-default.tar.gz", ""},
+ {"repo-layout-s3legacy.tar.gz", ""},
+ {"repo-layout-default.tar.gz", "default"},
+ {"repo-layout-s3legacy.tar.gz", "s3legacy"},
+ }
+
+ for _, test := range tests {
+ datafile := filepath.Join("..", "..", "internal", "backend", "testdata", test.filename)
+
+ rtest.SetupTarTestFixture(t, env.base, datafile)
+
+ env.gopts.extended["local.layout"] = test.layout
+
+ // check the repo
+ testRunCheck(t, env.gopts)
+
+ // restore latest snapshot
+ target := filepath.Join(env.base, "restore")
+ testRunRestoreLatest(t, env.gopts, target, nil, nil)
+
+ rtest.RemoveAll(t, filepath.Join(env.base, "repo"))
+ rtest.RemoveAll(t, target)
+ }
+}
diff --git a/cmd/restic/cmd_rewrite.go b/cmd/restic/cmd_rewrite.go
index 0d9aa1c8c..c08797c48 100644
--- a/cmd/restic/cmd_rewrite.go
+++ b/cmd/restic/cmd_rewrite.go
@@ -87,36 +87,67 @@ func rewriteSnapshot(ctx context.Context, repo *repository.Repository, sn *resti
return true
}
+ rewriter := walker.NewTreeRewriter(walker.RewriteOpts{
+ RewriteNode: func(node *restic.Node, path string) *restic.Node {
+ if selectByName(path) {
+ return node
+ }
+ Verbosef(fmt.Sprintf("excluding %s\n", path))
+ return nil
+ },
+ DisableNodeCache: true,
+ })
+
+ return filterAndReplaceSnapshot(ctx, repo, sn,
+ func(ctx context.Context, sn *restic.Snapshot) (restic.ID, error) {
+ return rewriter.RewriteTree(ctx, repo, "/", *sn.Tree)
+ }, opts.DryRun, opts.Forget, "rewrite")
+}
+
+func filterAndReplaceSnapshot(ctx context.Context, repo restic.Repository, sn *restic.Snapshot, filter func(ctx context.Context, sn *restic.Snapshot) (restic.ID, error), dryRun bool, forget bool, addTag string) (bool, error) {
+
wg, wgCtx := errgroup.WithContext(ctx)
repo.StartPackUploader(wgCtx, wg)
var filteredTree restic.ID
wg.Go(func() error {
- filteredTree, err = walker.FilterTree(wgCtx, repo, "/", *sn.Tree, &walker.TreeFilterVisitor{
- SelectByName: selectByName,
- PrintExclude: func(path string) { Verbosef(fmt.Sprintf("excluding %s\n", path)) },
- })
+ var err error
+ filteredTree, err = filter(ctx, sn)
if err != nil {
return err
}
return repo.Flush(wgCtx)
})
- err = wg.Wait()
+ err := wg.Wait()
if err != nil {
return false, err
}
+ if filteredTree.IsNull() {
+ if dryRun {
+ Verbosef("would delete empty snapshot\n")
+ } else {
+ h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
+ if err = repo.Backend().Remove(ctx, h); err != nil {
+ return false, err
+ }
+ debug.Log("removed empty snapshot %v", sn.ID())
+ Verbosef("removed empty snapshot %v\n", sn.ID().Str())
+ }
+ return true, nil
+ }
+
if filteredTree == *sn.Tree {
debug.Log("Snapshot %v not modified", sn)
return false, nil
}
debug.Log("Snapshot %v modified", sn)
- if opts.DryRun {
+ if dryRun {
Verbosef("would save new snapshot\n")
- if opts.Forget {
+ if forget {
Verbosef("would remove old snapshot\n")
}
@@ -125,10 +156,10 @@ func rewriteSnapshot(ctx context.Context, repo *repository.Repository, sn *resti
// Always set the original snapshot id as this essentially a new snapshot.
sn.Original = sn.ID()
- *sn.Tree = filteredTree
+ sn.Tree = &filteredTree
- if !opts.Forget {
- sn.AddTags([]string{"rewrite"})
+ if !forget {
+ sn.AddTags([]string{addTag})
}
// Save the new snapshot.
@@ -136,8 +167,9 @@ func rewriteSnapshot(ctx context.Context, repo *repository.Repository, sn *resti
if err != nil {
return false, err
}
+ Verbosef("saved new snapshot %v\n", id.Str())
- if opts.Forget {
+ if forget {
h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
if err = repo.Backend().Remove(ctx, h); err != nil {
return false, err
@@ -145,7 +177,6 @@ func rewriteSnapshot(ctx context.Context, repo *repository.Repository, sn *resti
debug.Log("removed old snapshot %v", sn.ID())
Verbosef("removed old snapshot %v\n", sn.ID().Str())
}
- Verbosef("saved new snapshot %v\n", id.Str())
return true, nil
}
@@ -164,9 +195,9 @@ func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, a
var err error
if opts.Forget {
Verbosef("create exclusive lock for repository\n")
- lock, ctx, err = lockRepoExclusive(ctx, repo)
+ lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
} else {
- lock, ctx, err = lockRepo(ctx, repo)
+ lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
}
defer unlockRepo(lock)
if err != nil {
diff --git a/cmd/restic/integration_rewrite_test.go b/cmd/restic/cmd_rewrite_integration_test.go
index e6007973b..e6007973b 100644
--- a/cmd/restic/integration_rewrite_test.go
+++ b/cmd/restic/cmd_rewrite_integration_test.go
diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go
index c5faa044a..889ac5e20 100644
--- a/cmd/restic/cmd_snapshots.go
+++ b/cmd/restic/cmd_snapshots.go
@@ -36,7 +36,7 @@ type SnapshotOptions struct {
Compact bool
Last bool // This option should be removed in favour of Latest.
Latest int
- GroupBy string
+ GroupBy restic.SnapshotGroupByOptions
}
var snapshotOptions SnapshotOptions
@@ -54,7 +54,7 @@ func init() {
panic(err)
}
f.IntVar(&snapshotOptions.Latest, "latest", 0, "only show the last `n` snapshots for each host and path")
- f.StringVarP(&snapshotOptions.GroupBy, "group-by", "g", "", "`group` snapshots by host, paths and/or tags, separated by comma")
+ f.VarP(&snapshotOptions.GroupBy, "group-by", "g", "`group` snapshots by host, paths and/or tags, separated by comma")
}
func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions, args []string) error {
@@ -65,7 +65,7 @@ func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions
if !gopts.NoLock {
var lock *restic.Lock
- lock, ctx, err = lockRepo(ctx, repo)
+ lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
@@ -94,7 +94,7 @@ func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions
}
if gopts.JSON {
- err := printSnapshotGroupJSON(gopts.stdout, snapshotGroups, grouped)
+ err := printSnapshotGroupJSON(globalOptions.stdout, snapshotGroups, grouped)
if err != nil {
Warnf("error printing snapshots: %v\n", err)
}
@@ -103,13 +103,13 @@ func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions
for k, list := range snapshotGroups {
if grouped {
- err := PrintSnapshotGroupHeader(gopts.stdout, k)
+ err := PrintSnapshotGroupHeader(globalOptions.stdout, k)
if err != nil {
Warnf("error printing snapshots: %v\n", err)
return nil
}
}
- PrintSnapshots(gopts.stdout, list, nil, opts.Compact)
+ PrintSnapshots(globalOptions.stdout, list, nil, opts.Compact)
}
return nil
diff --git a/cmd/restic/cmd_snapshots_integration_test.go b/cmd/restic/cmd_snapshots_integration_test.go
new file mode 100644
index 000000000..6eaa8faa4
--- /dev/null
+++ b/cmd/restic/cmd_snapshots_integration_test.go
@@ -0,0 +1,32 @@
+package main
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+
+ "github.com/restic/restic/internal/restic"
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func testRunSnapshots(t testing.TB, gopts GlobalOptions) (newest *Snapshot, snapmap map[restic.ID]Snapshot) {
+ buf, err := withCaptureStdout(func() error {
+ gopts.JSON = true
+
+ opts := SnapshotOptions{}
+ return runSnapshots(context.TODO(), opts, gopts, []string{})
+ })
+ rtest.OK(t, err)
+
+ snapshots := []Snapshot{}
+ rtest.OK(t, json.Unmarshal(buf.Bytes(), &snapshots))
+
+ snapmap = make(map[restic.ID]Snapshot, len(snapshots))
+ for _, sn := range snapshots {
+ snapmap[*sn.ID] = sn
+ if newest == nil || sn.Time.After(newest.Time) {
+ newest = &sn
+ }
+ }
+ return
+}
diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go
index 55ba6f254..a7ecd438f 100644
--- a/cmd/restic/cmd_stats.go
+++ b/cmd/restic/cmd_stats.go
@@ -5,11 +5,15 @@ import (
"encoding/json"
"fmt"
"path/filepath"
+ "strings"
+ "github.com/restic/chunker"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/crypto"
+ "github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui"
+ "github.com/restic/restic/internal/ui/table"
"github.com/restic/restic/internal/walker"
"github.com/minio/sha256-simd"
@@ -49,7 +53,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
- return runStats(cmd.Context(), globalOptions, args)
+ return runStats(cmd.Context(), statsOptions, globalOptions, args)
},
}
@@ -70,8 +74,8 @@ func init() {
initMultiSnapshotFilter(f, &statsOptions.SnapshotFilter, true)
}
-func runStats(ctx context.Context, gopts GlobalOptions, args []string) error {
- err := verifyStatsInput(gopts, args)
+func runStats(ctx context.Context, opts StatsOptions, gopts GlobalOptions, args []string) error {
+ err := verifyStatsInput(opts)
if err != nil {
return err
}
@@ -83,7 +87,7 @@ func runStats(ctx context.Context, gopts GlobalOptions, args []string) error {
if !gopts.NoLock {
var lock *restic.Lock
- lock, ctx, err = lockRepo(ctx, repo)
+ lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
@@ -99,6 +103,10 @@ func runStats(ctx context.Context, gopts GlobalOptions, args []string) error {
return err
}
+ if opts.countMode == countModeDebug {
+ return statsDebug(ctx, repo)
+ }
+
if !gopts.JSON {
Printf("scanning...\n")
}
@@ -111,8 +119,8 @@ func runStats(ctx context.Context, gopts GlobalOptions, args []string) error {
SnapshotsCount: 0,
}
- for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &statsOptions.SnapshotFilter, args) {
- err = statsWalkSnapshot(ctx, sn, repo, stats)
+ for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, args) {
+ err = statsWalkSnapshot(ctx, sn, repo, opts, stats)
if err != nil {
return fmt.Errorf("error walking snapshot: %v", err)
}
@@ -122,7 +130,7 @@ func runStats(ctx context.Context, gopts GlobalOptions, args []string) error {
return err
}
- if statsOptions.countMode == countModeRawData {
+ if opts.countMode == countModeRawData {
// the blob handles have been collected, but not yet counted
for blobHandle := range stats.blobs {
pbs := repo.Index().Lookup(blobHandle)
@@ -156,7 +164,7 @@ func runStats(ctx context.Context, gopts GlobalOptions, args []string) error {
return nil
}
- Printf("Stats in %s mode:\n", statsOptions.countMode)
+ Printf("Stats in %s mode:\n", opts.countMode)
Printf(" Snapshots processed: %d\n", stats.SnapshotsCount)
if stats.TotalBlobCount > 0 {
Printf(" Total Blob Count: %d\n", stats.TotalBlobCount)
@@ -181,21 +189,21 @@ func runStats(ctx context.Context, gopts GlobalOptions, args []string) error {
return nil
}
-func statsWalkSnapshot(ctx context.Context, snapshot *restic.Snapshot, repo restic.Repository, stats *statsContainer) error {
+func statsWalkSnapshot(ctx context.Context, snapshot *restic.Snapshot, repo restic.Repository, opts StatsOptions, stats *statsContainer) error {
if snapshot.Tree == nil {
return fmt.Errorf("snapshot %s has nil tree", snapshot.ID().Str())
}
stats.SnapshotsCount++
- if statsOptions.countMode == countModeRawData {
+ if opts.countMode == countModeRawData {
// count just the sizes of unique blobs; we don't need to walk the tree
// ourselves in this case, since a nifty function does it for us
return restic.FindUsedBlobs(ctx, repo, restic.IDs{*snapshot.Tree}, stats.blobs, nil)
}
uniqueInodes := make(map[uint64]struct{})
- err := walker.Walk(ctx, repo, *snapshot.Tree, restic.NewIDSet(), statsWalkTree(repo, stats, uniqueInodes))
+ err := walker.Walk(ctx, repo, *snapshot.Tree, restic.NewIDSet(), statsWalkTree(repo, opts, stats, uniqueInodes))
if err != nil {
return fmt.Errorf("walking tree %s: %v", *snapshot.Tree, err)
}
@@ -203,7 +211,7 @@ func statsWalkSnapshot(ctx context.Context, snapshot *restic.Snapshot, repo rest
return nil
}
-func statsWalkTree(repo restic.Repository, stats *statsContainer, uniqueInodes map[uint64]struct{}) walker.WalkFunc {
+func statsWalkTree(repo restic.Repository, opts StatsOptions, stats *statsContainer, uniqueInodes map[uint64]struct{}) walker.WalkFunc {
return func(parentTreeID restic.ID, npath string, node *restic.Node, nodeErr error) (bool, error) {
if nodeErr != nil {
return true, nodeErr
@@ -212,19 +220,19 @@ func statsWalkTree(repo restic.Repository, stats *statsContainer, uniqueInodes m
return true, nil
}
- if statsOptions.countMode == countModeUniqueFilesByContents || statsOptions.countMode == countModeBlobsPerFile {
+ if opts.countMode == countModeUniqueFilesByContents || opts.countMode == countModeBlobsPerFile {
// only count this file if we haven't visited it before
fid := makeFileIDByContents(node)
if _, ok := stats.uniqueFiles[fid]; !ok {
// mark the file as visited
stats.uniqueFiles[fid] = struct{}{}
- if statsOptions.countMode == countModeUniqueFilesByContents {
+ if opts.countMode == countModeUniqueFilesByContents {
// simply count the size of each unique file (unique by contents only)
stats.TotalSize += node.Size
stats.TotalFileCount++
}
- if statsOptions.countMode == countModeBlobsPerFile {
+ if opts.countMode == countModeBlobsPerFile {
// count the size of each unique blob reference, which is
// by unique file (unique by contents and file path)
for _, blobID := range node.Content {
@@ -254,7 +262,7 @@ func statsWalkTree(repo restic.Repository, stats *statsContainer, uniqueInodes m
}
}
- if statsOptions.countMode == countModeRestoreSize {
+ if opts.countMode == countModeRestoreSize {
// as this is a file in the snapshot, we can simply count its
// size without worrying about uniqueness, since duplicate files
// will still be restored
@@ -284,15 +292,16 @@ func makeFileIDByContents(node *restic.Node) fileID {
return sha256.Sum256(bb)
}
-func verifyStatsInput(gopts GlobalOptions, args []string) error {
+func verifyStatsInput(opts StatsOptions) error {
// require a recognized counting mode
- switch statsOptions.countMode {
+ switch opts.countMode {
case countModeRestoreSize:
case countModeUniqueFilesByContents:
case countModeBlobsPerFile:
case countModeRawData:
+ case countModeDebug:
default:
- return fmt.Errorf("unknown counting mode: %s (use the -h flag to get a list of supported modes)", statsOptions.countMode)
+ return fmt.Errorf("unknown counting mode: %s (use the -h flag to get a list of supported modes)", opts.countMode)
}
return nil
@@ -335,4 +344,149 @@ const (
countModeUniqueFilesByContents = "files-by-contents"
countModeBlobsPerFile = "blobs-per-file"
countModeRawData = "raw-data"
+ countModeDebug = "debug"
)
+
+func statsDebug(ctx context.Context, repo restic.Repository) error {
+ Warnf("Collecting size statistics\n\n")
+ for _, t := range []restic.FileType{restic.KeyFile, restic.LockFile, restic.IndexFile, restic.PackFile} {
+ hist, err := statsDebugFileType(ctx, repo, t)
+ if err != nil {
+ return err
+ }
+ Warnf("File Type: %v\n%v\n", t, hist)
+ }
+
+ hist := statsDebugBlobs(ctx, repo)
+ for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} {
+ Warnf("Blob Type: %v\n%v\n\n", t, hist[t])
+ }
+
+ return nil
+}
+
+func statsDebugFileType(ctx context.Context, repo restic.Repository, tpe restic.FileType) (*sizeHistogram, error) {
+ hist := newSizeHistogram(2 * repository.MaxPackSize)
+ err := repo.List(ctx, tpe, func(id restic.ID, size int64) error {
+ hist.Add(uint64(size))
+ return nil
+ })
+
+ return hist, err
+}
+
+func statsDebugBlobs(ctx context.Context, repo restic.Repository) [restic.NumBlobTypes]*sizeHistogram {
+ var hist [restic.NumBlobTypes]*sizeHistogram
+ for i := 0; i < len(hist); i++ {
+ hist[i] = newSizeHistogram(2 * chunker.MaxSize)
+ }
+
+ repo.Index().Each(ctx, func(pb restic.PackedBlob) {
+ hist[pb.Type].Add(uint64(pb.Length))
+ })
+
+ return hist
+}
+
+type sizeClass struct {
+ lower, upper uint64
+ count int64
+}
+
+type sizeHistogram struct {
+ count int64
+ totalSize uint64
+ buckets []sizeClass
+ oversized []uint64
+}
+
+func newSizeHistogram(sizeLimit uint64) *sizeHistogram {
+ h := &sizeHistogram{}
+ h.buckets = append(h.buckets, sizeClass{0, 0, 0})
+
+ lowerBound := uint64(1)
+ growthFactor := uint64(10)
+
+ for lowerBound < sizeLimit {
+ upperBound := lowerBound*growthFactor - 1
+ if upperBound > sizeLimit {
+ upperBound = sizeLimit
+ }
+ h.buckets = append(h.buckets, sizeClass{lowerBound, upperBound, 0})
+ lowerBound *= growthFactor
+ }
+
+ return h
+}
+
+func (s *sizeHistogram) Add(size uint64) {
+ s.count++
+ s.totalSize += size
+
+ for i, bucket := range s.buckets {
+ if size >= bucket.lower && size <= bucket.upper {
+ s.buckets[i].count++
+ return
+ }
+ }
+
+ s.oversized = append(s.oversized, size)
+}
+
+func (s sizeHistogram) String() string {
+ var out strings.Builder
+
+ out.WriteString(fmt.Sprintf("Count: %d\n", s.count))
+ out.WriteString(fmt.Sprintf("Total Size: %s\n", ui.FormatBytes(s.totalSize)))
+
+ t := table.New()
+ t.AddColumn("Size", "{{.SizeRange}}")
+ t.AddColumn("Count", "{{.Count}}")
+ type line struct {
+ SizeRange string
+ Count int64
+ }
+
+ // only print up to the highest used bucket size
+ lastFilledIdx := 0
+ for i := 0; i < len(s.buckets); i++ {
+ if s.buckets[i].count != 0 {
+ lastFilledIdx = i
+ }
+ }
+
+ var lines []line
+ hasStarted := false
+ for i, b := range s.buckets {
+ if i > lastFilledIdx {
+ break
+ }
+
+ if b.count > 0 {
+ hasStarted = true
+ }
+ if hasStarted {
+ lines = append(lines, line{
+ SizeRange: fmt.Sprintf("%d - %d Byte", b.lower, b.upper),
+ Count: b.count,
+ })
+ }
+ }
+ longestRange := 0
+ for _, l := range lines {
+ if longestRange < len(l.SizeRange) {
+ longestRange = len(l.SizeRange)
+ }
+ }
+ for i := range lines {
+ lines[i].SizeRange = strings.Repeat(" ", longestRange-len(lines[i].SizeRange)) + lines[i].SizeRange
+ t.AddRow(lines[i])
+ }
+
+ _ = t.Write(&out)
+
+ if len(s.oversized) > 0 {
+ out.WriteString(fmt.Sprintf("Oversized: %v\n", s.oversized))
+ }
+ return out.String()
+}
diff --git a/cmd/restic/cmd_stats_test.go b/cmd/restic/cmd_stats_test.go
new file mode 100644
index 000000000..02d37acd9
--- /dev/null
+++ b/cmd/restic/cmd_stats_test.go
@@ -0,0 +1,62 @@
+package main
+
+import (
+ "testing"
+
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func TestSizeHistogramNew(t *testing.T) {
+ h := newSizeHistogram(42)
+
+ exp := &sizeHistogram{
+ count: 0,
+ totalSize: 0,
+ buckets: []sizeClass{
+ {0, 0, 0},
+ {1, 9, 0},
+ {10, 42, 0},
+ },
+ }
+
+ rtest.Equals(t, exp, h)
+}
+
+func TestSizeHistogramAdd(t *testing.T) {
+ h := newSizeHistogram(42)
+ for i := uint64(0); i < 45; i++ {
+ h.Add(i)
+ }
+
+ exp := &sizeHistogram{
+ count: 45,
+ totalSize: 990,
+ buckets: []sizeClass{
+ {0, 0, 1},
+ {1, 9, 9},
+ {10, 42, 33},
+ },
+ oversized: []uint64{43, 44},
+ }
+
+ rtest.Equals(t, exp, h)
+}
+
+func TestSizeHistogramString(t *testing.T) {
+ t.Run("overflow", func(t *testing.T) {
+ h := newSizeHistogram(42)
+ h.Add(8)
+ h.Add(50)
+
+ rtest.Equals(t, "Count: 2\nTotal Size: 58 B\nSize Count\n-----------------\n1 - 9 Byte 1\n-----------------\nOversized: [50]\n", h.String())
+ })
+
+ t.Run("withZero", func(t *testing.T) {
+ h := newSizeHistogram(42)
+ h.Add(0)
+ h.Add(1)
+ h.Add(10)
+
+ rtest.Equals(t, "Count: 3\nTotal Size: 11 B\nSize Count\n-------------------\n 0 - 0 Byte 1\n 1 - 9 Byte 1\n10 - 42 Byte 1\n-------------------\n", h.String())
+ })
+}
diff --git a/cmd/restic/cmd_tag.go b/cmd/restic/cmd_tag.go
index e5948ea02..fe4638547 100644
--- a/cmd/restic/cmd_tag.go
+++ b/cmd/restic/cmd_tag.go
@@ -111,7 +111,7 @@ func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, args []st
if !gopts.NoLock {
Verbosef("create exclusive lock for repository\n")
var lock *restic.Lock
- lock, ctx, err = lockRepoExclusive(ctx, repo)
+ lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
diff --git a/cmd/restic/cmd_tag_integration_test.go b/cmd/restic/cmd_tag_integration_test.go
new file mode 100644
index 000000000..3b902c51e
--- /dev/null
+++ b/cmd/restic/cmd_tag_integration_test.go
@@ -0,0 +1,94 @@
+package main
+
+import (
+ "context"
+ "testing"
+
+ "github.com/restic/restic/internal/restic"
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func testRunTag(t testing.TB, opts TagOptions, gopts GlobalOptions) {
+ rtest.OK(t, runTag(context.TODO(), opts, gopts, []string{}))
+}
+
+func TestTag(t *testing.T) {
+ env, cleanup := withTestEnvironment(t)
+ defer cleanup()
+
+ testSetupBackupData(t, env)
+ testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
+ testRunCheck(t, env.gopts)
+ newest, _ := testRunSnapshots(t, env.gopts)
+ if newest == nil {
+ t.Fatal("expected a new backup, got nil")
+ }
+
+ rtest.Assert(t, len(newest.Tags) == 0,
+ "expected no tags, got %v", newest.Tags)
+ rtest.Assert(t, newest.Original == nil,
+ "expected original ID to be nil, got %v", newest.Original)
+ originalID := *newest.ID
+
+ testRunTag(t, TagOptions{SetTags: restic.TagLists{[]string{"NL"}}}, env.gopts)
+ testRunCheck(t, env.gopts)
+ newest, _ = testRunSnapshots(t, env.gopts)
+ if newest == nil {
+ t.Fatal("expected a backup, got nil")
+ }
+ rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL",
+ "set failed, expected one NL tag, got %v", newest.Tags)
+ rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
+ rtest.Assert(t, *newest.Original == originalID,
+ "expected original ID to be set to the first snapshot id")
+
+ testRunTag(t, TagOptions{AddTags: restic.TagLists{[]string{"CH"}}}, env.gopts)
+ testRunCheck(t, env.gopts)
+ newest, _ = testRunSnapshots(t, env.gopts)
+ if newest == nil {
+ t.Fatal("expected a backup, got nil")
+ }
+ rtest.Assert(t, len(newest.Tags) == 2 && newest.Tags[0] == "NL" && newest.Tags[1] == "CH",
+ "add failed, expected CH,NL tags, got %v", newest.Tags)
+ rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
+ rtest.Assert(t, *newest.Original == originalID,
+ "expected original ID to be set to the first snapshot id")
+
+ testRunTag(t, TagOptions{RemoveTags: restic.TagLists{[]string{"NL"}}}, env.gopts)
+ testRunCheck(t, env.gopts)
+ newest, _ = testRunSnapshots(t, env.gopts)
+ if newest == nil {
+ t.Fatal("expected a backup, got nil")
+ }
+ rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "CH",
+ "remove failed, expected one CH tag, got %v", newest.Tags)
+ rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
+ rtest.Assert(t, *newest.Original == originalID,
+ "expected original ID to be set to the first snapshot id")
+
+ testRunTag(t, TagOptions{AddTags: restic.TagLists{[]string{"US", "RU"}}}, env.gopts)
+ testRunTag(t, TagOptions{RemoveTags: restic.TagLists{[]string{"CH", "US", "RU"}}}, env.gopts)
+ testRunCheck(t, env.gopts)
+ newest, _ = testRunSnapshots(t, env.gopts)
+ if newest == nil {
+ t.Fatal("expected a backup, got nil")
+ }
+ rtest.Assert(t, len(newest.Tags) == 0,
+ "expected no tags, got %v", newest.Tags)
+ rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
+ rtest.Assert(t, *newest.Original == originalID,
+ "expected original ID to be set to the first snapshot id")
+
+ // Check special case of removing all tags.
+ testRunTag(t, TagOptions{SetTags: restic.TagLists{[]string{""}}}, env.gopts)
+ testRunCheck(t, env.gopts)
+ newest, _ = testRunSnapshots(t, env.gopts)
+ if newest == nil {
+ t.Fatal("expected a backup, got nil")
+ }
+ rtest.Assert(t, len(newest.Tags) == 0,
+ "expected no tags, got %v", newest.Tags)
+ rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
+ rtest.Assert(t, *newest.Original == originalID,
+ "expected original ID to be set to the first snapshot id")
+}
diff --git a/cmd/restic/exclude.go b/cmd/restic/exclude.go
index efe6f41e4..095944610 100644
--- a/cmd/restic/exclude.go
+++ b/cmd/restic/exclude.go
@@ -7,7 +7,6 @@ import (
"io"
"os"
"path/filepath"
- "strconv"
"strings"
"sync"
@@ -17,6 +16,7 @@ import (
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/textfile"
+ "github.com/restic/restic/internal/ui"
"github.com/spf13/pflag"
)
@@ -364,7 +364,7 @@ func rejectResticCache(repo *repository.Repository) (RejectByNameFunc, error) {
}
func rejectBySize(maxSizeStr string) (RejectFunc, error) {
- maxSize, err := parseSizeStr(maxSizeStr)
+ maxSize, err := ui.ParseBytes(maxSizeStr)
if err != nil {
return nil, err
}
@@ -385,35 +385,6 @@ func rejectBySize(maxSizeStr string) (RejectFunc, error) {
}, nil
}
-func parseSizeStr(sizeStr string) (int64, error) {
- if sizeStr == "" {
- return 0, errors.New("expected size, got empty string")
- }
-
- numStr := sizeStr[:len(sizeStr)-1]
- var unit int64 = 1
-
- switch sizeStr[len(sizeStr)-1] {
- case 'b', 'B':
- // use initialized values, do nothing here
- case 'k', 'K':
- unit = 1024
- case 'm', 'M':
- unit = 1024 * 1024
- case 'g', 'G':
- unit = 1024 * 1024 * 1024
- case 't', 'T':
- unit = 1024 * 1024 * 1024 * 1024
- default:
- numStr = sizeStr
- }
- value, err := strconv.ParseInt(numStr, 10, 64)
- if err != nil {
- return 0, err
- }
- return value * unit, nil
-}
-
// readExcludePatternsFromFiles reads all exclude files and returns the list of
// exclude patterns. For each line, leading and trailing white space is removed
// and comment lines are ignored. For each remaining pattern, environment
diff --git a/cmd/restic/exclude_test.go b/cmd/restic/exclude_test.go
index 050a083e4..9a24418ae 100644
--- a/cmd/restic/exclude_test.go
+++ b/cmd/restic/exclude_test.go
@@ -187,54 +187,6 @@ func TestMultipleIsExcludedByFile(t *testing.T) {
}
}
-func TestParseSizeStr(t *testing.T) {
- sizeStrTests := []struct {
- in string
- expected int64
- }{
- {"1024", 1024},
- {"1024b", 1024},
- {"1024B", 1024},
- {"1k", 1024},
- {"100k", 102400},
- {"100K", 102400},
- {"10M", 10485760},
- {"100m", 104857600},
- {"20G", 21474836480},
- {"10g", 10737418240},
- {"2T", 2199023255552},
- {"2t", 2199023255552},
- }
-
- for _, tt := range sizeStrTests {
- actual, err := parseSizeStr(tt.in)
- test.OK(t, err)
-
- if actual != tt.expected {
- t.Errorf("parseSizeStr(%s) = %d; expected %d", tt.in, actual, tt.expected)
- }
- }
-}
-
-func TestParseInvalidSizeStr(t *testing.T) {
- invalidSizes := []string{
- "",
- " ",
- "foobar",
- "zzz",
- }
-
- for _, s := range invalidSizes {
- v, err := parseSizeStr(s)
- if err == nil {
- t.Errorf("wanted error for invalid value %q, got nil", s)
- }
- if v != 0 {
- t.Errorf("wanted zero for invalid value %q, got: %v", s, v)
- }
- }
-}
-
// TestIsExcludedByFileSize is for testing the instance of
// --exclude-larger-than parameters
func TestIsExcludedByFileSize(t *testing.T) {
diff --git a/cmd/restic/format.go b/cmd/restic/format.go
index 2f14a4575..063cd4e71 100644
--- a/cmd/restic/format.go
+++ b/cmd/restic/format.go
@@ -5,9 +5,10 @@ import (
"os"
"github.com/restic/restic/internal/restic"
+ "github.com/restic/restic/internal/ui"
)
-func formatNode(path string, n *restic.Node, long bool) string {
+func formatNode(path string, n *restic.Node, long bool, human bool) string {
if !long {
return path
}
@@ -15,6 +16,13 @@ func formatNode(path string, n *restic.Node, long bool) string {
var mode os.FileMode
var target string
+ var size string
+ if human {
+ size = ui.FormatBytes(n.Size)
+ } else {
+ size = fmt.Sprintf("%6d", n.Size)
+ }
+
switch n.Type {
case "file":
mode = 0
@@ -33,8 +41,8 @@ func formatNode(path string, n *restic.Node, long bool) string {
mode = os.ModeSocket
}
- return fmt.Sprintf("%s %5d %5d %6d %s %s%s",
- mode|n.Mode, n.UID, n.GID, n.Size,
+ return fmt.Sprintf("%s %5d %5d %s %s %s%s",
+ mode|n.Mode, n.UID, n.GID, size,
n.ModTime.Local().Format(TimeFormat), path,
target)
}
diff --git a/cmd/restic/format_test.go b/cmd/restic/format_test.go
new file mode 100644
index 000000000..689bd27a5
--- /dev/null
+++ b/cmd/restic/format_test.go
@@ -0,0 +1,61 @@
+package main
+
+import (
+ "testing"
+ "time"
+
+ "github.com/restic/restic/internal/restic"
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func TestFormatNode(t *testing.T) {
+ // overwrite time zone to ensure the data is formatted reproducibly
+ tz := time.Local
+ time.Local = time.UTC
+ defer func() {
+ time.Local = tz
+ }()
+
+ testPath := "/test/path"
+ node := restic.Node{
+ Name: "baz",
+ Type: "file",
+ Size: 14680064,
+ UID: 1000,
+ GID: 2000,
+ ModTime: time.Date(2020, 1, 2, 3, 4, 5, 0, time.UTC),
+ }
+
+ for _, c := range []struct {
+ path string
+ restic.Node
+ long bool
+ human bool
+ expect string
+ }{
+ {
+ path: testPath,
+ Node: node,
+ long: false,
+ human: false,
+ expect: testPath,
+ },
+ {
+ path: testPath,
+ Node: node,
+ long: true,
+ human: false,
+ expect: "---------- 1000 2000 14680064 2020-01-02 03:04:05 " + testPath,
+ },
+ {
+ path: testPath,
+ Node: node,
+ long: true,
+ human: true,
+ expect: "---------- 1000 2000 14.000 MiB 2020-01-02 03:04:05 " + testPath,
+ },
+ } {
+ r := formatNode(c.path, &c.Node, c.long, c.human)
+ rtest.Equals(t, c.expect, r)
+ }
+}
diff --git a/cmd/restic/global.go b/cmd/restic/global.go
index 206229d94..63e13c3ae 100644
--- a/cmd/restic/global.go
+++ b/cmd/restic/global.go
@@ -10,7 +10,6 @@ import (
"runtime"
"strconv"
"strings"
- "syscall"
"time"
"github.com/restic/restic/internal/backend"
@@ -20,10 +19,12 @@ import (
"github.com/restic/restic/internal/backend/limiter"
"github.com/restic/restic/internal/backend/local"
"github.com/restic/restic/internal/backend/location"
+ "github.com/restic/restic/internal/backend/logger"
"github.com/restic/restic/internal/backend/rclone"
"github.com/restic/restic/internal/backend/rest"
"github.com/restic/restic/internal/backend/retry"
"github.com/restic/restic/internal/backend/s3"
+ "github.com/restic/restic/internal/backend/sema"
"github.com/restic/restic/internal/backend/sftp"
"github.com/restic/restic/internal/backend/swift"
"github.com/restic/restic/internal/cache"
@@ -42,7 +43,7 @@ import (
"golang.org/x/term"
)
-var version = "0.15.2"
+var version = "0.16.0"
// TimeFormat is the format used for all timestamps printed by restic.
const TimeFormat = "2006-01-02 15:04:05"
@@ -59,6 +60,7 @@ type GlobalOptions struct {
Quiet bool
Verbose int
NoLock bool
+ RetryLock time.Duration
JSON bool
CacheDir string
NoCache bool
@@ -73,6 +75,7 @@ type GlobalOptions struct {
stdout io.Writer
stderr io.Writer
+ backends *location.Registry
backendTestHook, backendInnerTestHook backendWrapper
// verbosity is set as follows:
@@ -96,6 +99,18 @@ var isReadingPassword bool
var internalGlobalCtx context.Context
func init() {
+ backends := location.NewRegistry()
+ backends.Register(azure.NewFactory())
+ backends.Register(b2.NewFactory())
+ backends.Register(gs.NewFactory())
+ backends.Register(local.NewFactory())
+ backends.Register(rclone.NewFactory())
+ backends.Register(rest.NewFactory())
+ backends.Register(s3.NewFactory())
+ backends.Register(sftp.NewFactory())
+ backends.Register(swift.NewFactory())
+ globalOptions.backends = backends
+
var cancel context.CancelFunc
internalGlobalCtx, cancel = context.WithCancel(context.Background())
AddCleanupHandler(func(code int) (int, error) {
@@ -115,14 +130,15 @@ func init() {
// use empty paremeter name as `-v, --verbose n` instead of the correct `--verbose=n` is confusing
f.CountVarP(&globalOptions.Verbose, "verbose", "v", "be verbose (specify multiple times or a level using --verbose=n``, max level/times is 2)")
f.BoolVar(&globalOptions.NoLock, "no-lock", false, "do not lock the repository, this allows some operations on read-only repositories")
+ f.DurationVar(&globalOptions.RetryLock, "retry-lock", 0, "retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)")
f.BoolVarP(&globalOptions.JSON, "json", "", false, "set output mode to JSON for commands that support it")
f.StringVar(&globalOptions.CacheDir, "cache-dir", "", "set the cache `directory`. (default: use system default cache directory)")
f.BoolVar(&globalOptions.NoCache, "no-cache", false, "do not use a local cache")
- f.StringSliceVar(&globalOptions.RootCertFilenames, "cacert", nil, "`file` to load root certificates from (default: use system certificates)")
- f.StringVar(&globalOptions.TLSClientCertKeyFilename, "tls-client-cert", "", "path to a `file` containing PEM encoded TLS client certificate and private key")
+ f.StringSliceVar(&globalOptions.RootCertFilenames, "cacert", nil, "`file` to load root certificates from (default: use system certificates or $RESTIC_CACERT)")
+ f.StringVar(&globalOptions.TLSClientCertKeyFilename, "tls-client-cert", "", "path to a `file` containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)")
f.BoolVar(&globalOptions.InsecureTLS, "insecure-tls", false, "skip TLS certificate verification when connecting to the repository (insecure)")
f.BoolVar(&globalOptions.CleanupCache, "cleanup-cache", false, "auto remove old cache directories")
- f.Var(&globalOptions.Compression, "compression", "compression mode (only available for repository format version 2), one of (auto|off|max)")
+ f.Var(&globalOptions.Compression, "compression", "compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)")
f.IntVar(&globalOptions.Limits.UploadKb, "limit-upload", 0, "limits uploads to a maximum `rate` in KiB/s. (default: unlimited)")
f.IntVar(&globalOptions.Limits.DownloadKb, "limit-download", 0, "limits downloads to a maximum `rate` in KiB/s. (default: unlimited)")
f.UintVar(&globalOptions.PackSize, "pack-size", 0, "set target pack `size` in MiB, created pack files may be larger (default: $RESTIC_PACK_SIZE)")
@@ -135,6 +151,10 @@ func init() {
globalOptions.PasswordFile = os.Getenv("RESTIC_PASSWORD_FILE")
globalOptions.KeyHint = os.Getenv("RESTIC_KEY_HINT")
globalOptions.PasswordCommand = os.Getenv("RESTIC_PASSWORD_COMMAND")
+ if os.Getenv("RESTIC_CACERT") != "" {
+ globalOptions.RootCertFilenames = strings.Split(os.Getenv("RESTIC_CACERT"), ",")
+ }
+ globalOptions.TLSClientCertKeyFilename = os.Getenv("RESTIC_TLS_CLIENT_CERT")
comp := os.Getenv("RESTIC_COMPRESSION")
if comp != "" {
// ignore error as there's no good way to handle it
@@ -147,21 +167,6 @@ func init() {
restoreTerminal()
}
-// checkErrno returns nil when err is set to syscall.Errno(0), since this is no
-// error condition.
-func checkErrno(err error) error {
- e, ok := err.(syscall.Errno)
- if !ok {
- return err
- }
-
- if e == 0 {
- return nil
- }
-
- return err
-}
-
func stdinIsTerminal() bool {
return term.IsTerminal(int(os.Stdin.Fd()))
}
@@ -210,7 +215,7 @@ func restoreTerminal() {
if !isReadingPassword {
return code, nil
}
- err := checkErrno(term.Restore(fd, state))
+ err := term.Restore(fd, state)
if err != nil {
fmt.Fprintf(os.Stderr, "unable to restore terminal state: %v\n", err)
}
@@ -280,6 +285,7 @@ func Warnf(format string, args ...interface{}) {
if err != nil {
fmt.Fprintf(os.Stderr, "unable to write to stderr: %v\n", err)
}
+ debug.Log(format, args...)
}
// resolvePassword determines the password to be used for opening the repository.
@@ -451,7 +457,7 @@ func OpenRepository(ctx context.Context, opts GlobalOptions) (*repository.Reposi
PackSize: opts.PackSize * 1024 * 1024,
})
if err != nil {
- return nil, err
+ return nil, errors.Fatal(err.Error())
}
passwordTriesLeft := 1
@@ -546,153 +552,25 @@ func OpenRepository(ctx context.Context, opts GlobalOptions) (*repository.Reposi
}
func parseConfig(loc location.Location, opts options.Options) (interface{}, error) {
+ cfg := loc.Config
+ if cfg, ok := cfg.(restic.ApplyEnvironmenter); ok {
+ cfg.ApplyEnvironment("")
+ }
+
// only apply options for a particular backend here
opts = opts.Extract(loc.Scheme)
-
- switch loc.Scheme {
- case "local":
- cfg := loc.Config.(local.Config)
- if err := opts.Apply(loc.Scheme, &cfg); err != nil {
- return nil, err
- }
-
- debug.Log("opening local repository at %#v", cfg)
- return cfg, nil
-
- case "sftp":
- cfg := loc.Config.(sftp.Config)
- if err := opts.Apply(loc.Scheme, &cfg); err != nil {
- return nil, err
- }
-
- debug.Log("opening sftp repository at %#v", cfg)
- return cfg, nil
-
- case "s3":
- cfg := loc.Config.(s3.Config)
- if cfg.KeyID == "" {
- cfg.KeyID = os.Getenv("AWS_ACCESS_KEY_ID")
- }
-
- if cfg.Secret.String() == "" {
- cfg.Secret = options.NewSecretString(os.Getenv("AWS_SECRET_ACCESS_KEY"))
- }
-
- if cfg.KeyID == "" && cfg.Secret.String() != "" {
- return nil, errors.Fatalf("unable to open S3 backend: Key ID ($AWS_ACCESS_KEY_ID) is empty")
- } else if cfg.KeyID != "" && cfg.Secret.String() == "" {
- return nil, errors.Fatalf("unable to open S3 backend: Secret ($AWS_SECRET_ACCESS_KEY) is empty")
- }
-
- if cfg.Region == "" {
- cfg.Region = os.Getenv("AWS_DEFAULT_REGION")
- }
-
- if err := opts.Apply(loc.Scheme, &cfg); err != nil {
- return nil, err
- }
-
- debug.Log("opening s3 repository at %#v", cfg)
- return cfg, nil
-
- case "gs":
- cfg := loc.Config.(gs.Config)
- if cfg.ProjectID == "" {
- cfg.ProjectID = os.Getenv("GOOGLE_PROJECT_ID")
- }
-
- if err := opts.Apply(loc.Scheme, &cfg); err != nil {
- return nil, err
- }
-
- debug.Log("opening gs repository at %#v", cfg)
- return cfg, nil
-
- case "azure":
- cfg := loc.Config.(azure.Config)
- if cfg.AccountName == "" {
- cfg.AccountName = os.Getenv("AZURE_ACCOUNT_NAME")
- }
-
- if cfg.AccountKey.String() == "" {
- cfg.AccountKey = options.NewSecretString(os.Getenv("AZURE_ACCOUNT_KEY"))
- }
-
- if cfg.AccountSAS.String() == "" {
- cfg.AccountSAS = options.NewSecretString(os.Getenv("AZURE_ACCOUNT_SAS"))
- }
-
- if err := opts.Apply(loc.Scheme, &cfg); err != nil {
- return nil, err
- }
-
- debug.Log("opening gs repository at %#v", cfg)
- return cfg, nil
-
- case "swift":
- cfg := loc.Config.(swift.Config)
-
- if err := swift.ApplyEnvironment("", &cfg); err != nil {
- return nil, err
- }
-
- if err := opts.Apply(loc.Scheme, &cfg); err != nil {
- return nil, err
- }
-
- debug.Log("opening swift repository at %#v", cfg)
- return cfg, nil
-
- case "b2":
- cfg := loc.Config.(b2.Config)
-
- if cfg.AccountID == "" {
- cfg.AccountID = os.Getenv("B2_ACCOUNT_ID")
- }
-
- if cfg.AccountID == "" {
- return nil, errors.Fatalf("unable to open B2 backend: Account ID ($B2_ACCOUNT_ID) is empty")
- }
-
- if cfg.Key.String() == "" {
- cfg.Key = options.NewSecretString(os.Getenv("B2_ACCOUNT_KEY"))
- }
-
- if cfg.Key.String() == "" {
- return nil, errors.Fatalf("unable to open B2 backend: Key ($B2_ACCOUNT_KEY) is empty")
- }
-
- if err := opts.Apply(loc.Scheme, &cfg); err != nil {
- return nil, err
- }
-
- debug.Log("opening b2 repository at %#v", cfg)
- return cfg, nil
- case "rest":
- cfg := loc.Config.(rest.Config)
- if err := opts.Apply(loc.Scheme, &cfg); err != nil {
- return nil, err
- }
-
- debug.Log("opening rest repository at %#v", cfg)
- return cfg, nil
- case "rclone":
- cfg := loc.Config.(rclone.Config)
- if err := opts.Apply(loc.Scheme, &cfg); err != nil {
- return nil, err
- }
-
- debug.Log("opening rest repository at %#v", cfg)
- return cfg, nil
+ if err := opts.Apply(loc.Scheme, cfg); err != nil {
+ return nil, err
}
- return nil, errors.Fatalf("invalid backend: %q", loc.Scheme)
+ debug.Log("opening %v repository at %#v", loc.Scheme, cfg)
+ return cfg, nil
}
// Open the backend specified by a location config.
func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (restic.Backend, error) {
- debug.Log("parsing location %v", location.StripPassword(s))
- loc, err := location.Parse(s)
+ debug.Log("parsing location %v", location.StripPassword(gopts.backends, s))
+ loc, err := location.Parse(gopts.backends, s)
if err != nil {
return nil, errors.Fatalf("parsing repository location failed: %v", err)
}
@@ -706,41 +584,26 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio
rt, err := backend.Transport(globalOptions.TransportOptions)
if err != nil {
- return nil, err
+ return nil, errors.Fatal(err.Error())
}
// wrap the transport so that the throughput via HTTP is limited
lim := limiter.NewStaticLimiter(gopts.Limits)
rt = lim.Transport(rt)
- switch loc.Scheme {
- case "local":
- be, err = local.Open(ctx, cfg.(local.Config))
- case "sftp":
- be, err = sftp.Open(ctx, cfg.(sftp.Config))
- case "s3":
- be, err = s3.Open(ctx, cfg.(s3.Config), rt)
- case "gs":
- be, err = gs.Open(cfg.(gs.Config), rt)
- case "azure":
- be, err = azure.Open(ctx, cfg.(azure.Config), rt)
- case "swift":
- be, err = swift.Open(ctx, cfg.(swift.Config), rt)
- case "b2":
- be, err = b2.Open(ctx, cfg.(b2.Config), rt)
- case "rest":
- be, err = rest.Open(cfg.(rest.Config), rt)
- case "rclone":
- be, err = rclone.Open(cfg.(rclone.Config), lim)
-
- default:
+ factory := gopts.backends.Lookup(loc.Scheme)
+ if factory == nil {
return nil, errors.Fatalf("invalid backend: %q", loc.Scheme)
}
+ be, err = factory.Open(ctx, cfg, rt, lim)
if err != nil {
- return nil, errors.Fatalf("unable to open repository at %v: %v", location.StripPassword(s), err)
+ return nil, errors.Fatalf("unable to open repository at %v: %v", location.StripPassword(gopts.backends, s), err)
}
+ // wrap with debug logging and connection limiting
+ be = logger.New(sema.NewBackend(be))
+
// wrap backend if a test specified an inner hook
if gopts.backendInnerTestHook != nil {
be, err = gopts.backendInnerTestHook(be)
@@ -749,15 +612,10 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio
}
}
- if loc.Scheme == "local" || loc.Scheme == "sftp" {
- // wrap the backend in a LimitBackend so that the throughput is limited
- be = limiter.LimitBackend(be, lim)
- }
-
// check if config is there
fi, err := be.Stat(ctx, restic.Handle{Type: restic.ConfigFile})
if err != nil {
- return nil, errors.Fatalf("unable to open config file: %v\nIs there a repository at the following location?\n%v", err, location.StripPassword(s))
+ return nil, errors.Fatalf("unable to open config file: %v\nIs there a repository at the following location?\n%v", err, location.StripPassword(gopts.backends, s))
}
if fi.Size == 0 {
@@ -768,9 +626,9 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio
}
// Create the backend specified by URI.
-func create(ctx context.Context, s string, opts options.Options) (restic.Backend, error) {
- debug.Log("parsing location %v", s)
- loc, err := location.Parse(s)
+func create(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (restic.Backend, error) {
+ debug.Log("parsing location %v", location.StripPassword(gopts.backends, s))
+ loc, err := location.Parse(gopts.backends, s)
if err != nil {
return nil, err
}
@@ -782,30 +640,18 @@ func create(ctx context.Context, s string, opts options.Options) (restic.Backend
rt, err := backend.Transport(globalOptions.TransportOptions)
if err != nil {
+ return nil, errors.Fatal(err.Error())
+ }
+
+ factory := gopts.backends.Lookup(loc.Scheme)
+ if factory == nil {
+ return nil, errors.Fatalf("invalid backend: %q", loc.Scheme)
+ }
+
+ be, err := factory.Create(ctx, cfg, rt, nil)
+ if err != nil {
return nil, err
}
- switch loc.Scheme {
- case "local":
- return local.Create(ctx, cfg.(local.Config))
- case "sftp":
- return sftp.Create(ctx, cfg.(sftp.Config))
- case "s3":
- return s3.Create(ctx, cfg.(s3.Config), rt)
- case "gs":
- return gs.Create(cfg.(gs.Config), rt)
- case "azure":
- return azure.Create(ctx, cfg.(azure.Config), rt)
- case "swift":
- return swift.Open(ctx, cfg.(swift.Config), rt)
- case "b2":
- return b2.Create(ctx, cfg.(b2.Config), rt)
- case "rest":
- return rest.Create(ctx, cfg.(rest.Config), rt)
- case "rclone":
- return rclone.Create(ctx, cfg.(rclone.Config))
- }
-
- debug.Log("invalid repository scheme: %v", s)
- return nil, errors.Fatalf("invalid scheme %q", loc.Scheme)
+ return logger.New(sema.NewBackend(be)), nil
}
diff --git a/cmd/restic/global_test.go b/cmd/restic/global_test.go
index 85a9514b9..4f5c29e9a 100644
--- a/cmd/restic/global_test.go
+++ b/cmd/restic/global_test.go
@@ -1,37 +1,29 @@
package main
import (
- "bytes"
"os"
"path/filepath"
"testing"
- "github.com/restic/restic/internal/test"
rtest "github.com/restic/restic/internal/test"
)
func Test_PrintFunctionsRespectsGlobalStdout(t *testing.T) {
- gopts := globalOptions
- defer func() {
- globalOptions = gopts
- }()
-
- buf := bytes.NewBuffer(nil)
- globalOptions.stdout = buf
-
for _, p := range []func(){
func() { Println("message") },
func() { Print("message\n") },
func() { Printf("mes%s\n", "sage") },
} {
- p()
+ buf, _ := withCaptureStdout(func() error {
+ p()
+ return nil
+ })
rtest.Equals(t, "message\n", buf.String())
- buf.Reset()
}
}
func TestReadRepo(t *testing.T) {
- tempDir := test.TempDir(t)
+ tempDir := rtest.TempDir(t)
// test --repo option
var opts GlobalOptions
diff --git a/cmd/restic/integration_filter_pattern_test.go b/cmd/restic/integration_filter_pattern_test.go
index ea5753d20..2eacdeea9 100644
--- a/cmd/restic/integration_filter_pattern_test.go
+++ b/cmd/restic/integration_filter_pattern_test.go
@@ -70,28 +70,28 @@ func TestRestoreFailsWhenUsingInvalidPatterns(t *testing.T) {
var err error
// Test --exclude
- err = testRunRestoreAssumeFailure(t, "latest", RestoreOptions{Exclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
+ err = testRunRestoreAssumeFailure("latest", RestoreOptions{Exclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
rtest.Equals(t, `Fatal: --exclude: invalid pattern(s) provided:
*[._]log[.-][0-9]
!*[._]log[.-][0-9]`, err.Error())
// Test --iexclude
- err = testRunRestoreAssumeFailure(t, "latest", RestoreOptions{InsensitiveExclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
+ err = testRunRestoreAssumeFailure("latest", RestoreOptions{InsensitiveExclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
rtest.Equals(t, `Fatal: --iexclude: invalid pattern(s) provided:
*[._]log[.-][0-9]
!*[._]log[.-][0-9]`, err.Error())
// Test --include
- err = testRunRestoreAssumeFailure(t, "latest", RestoreOptions{Include: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
+ err = testRunRestoreAssumeFailure("latest", RestoreOptions{Include: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
rtest.Equals(t, `Fatal: --include: invalid pattern(s) provided:
*[._]log[.-][0-9]
!*[._]log[.-][0-9]`, err.Error())
// Test --iinclude
- err = testRunRestoreAssumeFailure(t, "latest", RestoreOptions{InsensitiveInclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
+ err = testRunRestoreAssumeFailure("latest", RestoreOptions{InsensitiveInclude: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}, env.gopts)
rtest.Equals(t, `Fatal: --iinclude: invalid pattern(s) provided:
*[._]log[.-][0-9]
diff --git a/cmd/restic/integration_helpers_test.go b/cmd/restic/integration_helpers_test.go
index 655aa9335..b7cb5b333 100644
--- a/cmd/restic/integration_helpers_test.go
+++ b/cmd/restic/integration_helpers_test.go
@@ -2,17 +2,23 @@ package main
import (
"bytes"
+ "context"
+ "crypto/rand"
"fmt"
+ "io"
"os"
"path/filepath"
"runtime"
+ "sync"
"testing"
"github.com/restic/restic/internal/backend/retry"
+ "github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/options"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
+ "github.com/restic/restic/internal/ui/termstatus"
)
type dirEntry struct {
@@ -200,6 +206,8 @@ func withTestEnvironment(t testing.TB) (env *testEnvironment, cleanup func()) {
// replace this hook with "nil" if listing a filetype more than once is necessary
backendTestHook: func(r restic.Backend) (restic.Backend, error) { return newOrderedListOnceBackend(r), nil },
+ // start with default set of backends
+ backends: globalOptions.backends,
}
// always overwrite global options
@@ -215,3 +223,157 @@ func withTestEnvironment(t testing.TB) (env *testEnvironment, cleanup func()) {
return env, cleanup
}
+
+func testSetupBackupData(t testing.TB, env *testEnvironment) string {
+ datafile := filepath.Join("testdata", "backup-data.tar.gz")
+ testRunInit(t, env.gopts)
+ rtest.SetupTarTestFixture(t, env.testdata, datafile)
+ return datafile
+}
+
+func listPacks(gopts GlobalOptions, t *testing.T) restic.IDSet {
+ r, err := OpenRepository(context.TODO(), gopts)
+ rtest.OK(t, err)
+
+ packs := restic.NewIDSet()
+
+ rtest.OK(t, r.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error {
+ packs.Insert(id)
+ return nil
+ }))
+ return packs
+}
+
+func removePacks(gopts GlobalOptions, t testing.TB, remove restic.IDSet) {
+ r, err := OpenRepository(context.TODO(), gopts)
+ rtest.OK(t, err)
+
+ for id := range remove {
+ rtest.OK(t, r.Backend().Remove(context.TODO(), restic.Handle{Type: restic.PackFile, Name: id.String()}))
+ }
+}
+
+func removePacksExcept(gopts GlobalOptions, t testing.TB, keep restic.IDSet, removeTreePacks bool) {
+ r, err := OpenRepository(context.TODO(), gopts)
+ rtest.OK(t, err)
+
+ // Get all tree packs
+ rtest.OK(t, r.LoadIndex(context.TODO()))
+
+ treePacks := restic.NewIDSet()
+ r.Index().Each(context.TODO(), func(pb restic.PackedBlob) {
+ if pb.Type == restic.TreeBlob {
+ treePacks.Insert(pb.PackID)
+ }
+ })
+
+ // remove all packs containing data blobs
+ rtest.OK(t, r.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error {
+ if treePacks.Has(id) != removeTreePacks || keep.Has(id) {
+ return nil
+ }
+ return r.Backend().Remove(context.TODO(), restic.Handle{Type: restic.PackFile, Name: id.String()})
+ }))
+}
+
+func includes(haystack []string, needle string) bool {
+ for _, s := range haystack {
+ if s == needle {
+ return true
+ }
+ }
+
+ return false
+}
+
+func loadSnapshotMap(t testing.TB, gopts GlobalOptions) map[string]struct{} {
+ snapshotIDs := testRunList(t, "snapshots", gopts)
+
+ m := make(map[string]struct{})
+ for _, id := range snapshotIDs {
+ m[id.String()] = struct{}{}
+ }
+
+ return m
+}
+
+func lastSnapshot(old, new map[string]struct{}) (map[string]struct{}, string) {
+ for k := range new {
+ if _, ok := old[k]; !ok {
+ old[k] = struct{}{}
+ return old, k
+ }
+ }
+
+ return old, ""
+}
+
+func appendRandomData(filename string, bytes uint) error {
+ f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ fmt.Fprint(os.Stderr, err)
+ return err
+ }
+
+ _, err = f.Seek(0, 2)
+ if err != nil {
+ fmt.Fprint(os.Stderr, err)
+ return err
+ }
+
+ _, err = io.Copy(f, io.LimitReader(rand.Reader, int64(bytes)))
+ if err != nil {
+ fmt.Fprint(os.Stderr, err)
+ return err
+ }
+
+ return f.Close()
+}
+
+func testFileSize(filename string, size int64) error {
+ fi, err := os.Stat(filename)
+ if err != nil {
+ return err
+ }
+
+ if fi.Size() != size {
+ return errors.Fatalf("wrong file size for %v: expected %v, got %v", filename, size, fi.Size())
+ }
+
+ return nil
+}
+
+func withRestoreGlobalOptions(inner func() error) error {
+ gopts := globalOptions
+ defer func() {
+ globalOptions = gopts
+ }()
+ return inner()
+}
+
+func withCaptureStdout(inner func() error) (*bytes.Buffer, error) {
+ buf := bytes.NewBuffer(nil)
+ err := withRestoreGlobalOptions(func() error {
+ globalOptions.stdout = buf
+ return inner()
+ })
+
+ return buf, err
+}
+
+func withTermStatus(gopts GlobalOptions, callback func(ctx context.Context, term *termstatus.Terminal) error) error {
+ ctx, cancel := context.WithCancel(context.TODO())
+ var wg sync.WaitGroup
+
+ term := termstatus.New(gopts.stdout, gopts.stderr, gopts.Quiet)
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ term.Run(ctx)
+ }()
+
+ defer wg.Wait()
+ defer cancel()
+
+ return callback(ctx, term)
+}
diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go
index c87722f02..8ea4d17d9 100644
--- a/cmd/restic/integration_test.go
+++ b/cmd/restic/integration_test.go
@@ -1,1561 +1,18 @@
package main
import (
- "bufio"
- "bytes"
"context"
- "crypto/rand"
- "encoding/json"
"fmt"
"io"
- mrand "math/rand"
"os"
"path/filepath"
- "regexp"
- "runtime"
- "strings"
- "sync"
- "syscall"
"testing"
- "time"
"github.com/restic/restic/internal/errors"
- "github.com/restic/restic/internal/filter"
- "github.com/restic/restic/internal/fs"
- "github.com/restic/restic/internal/index"
- "github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
- "github.com/restic/restic/internal/ui/termstatus"
- "golang.org/x/sync/errgroup"
)
-func parseIDsFromReader(t testing.TB, rd io.Reader) restic.IDs {
- IDs := restic.IDs{}
- sc := bufio.NewScanner(rd)
-
- for sc.Scan() {
- id, err := restic.ParseID(sc.Text())
- if err != nil {
- t.Logf("parse id %v: %v", sc.Text(), err)
- continue
- }
-
- IDs = append(IDs, id)
- }
-
- return IDs
-}
-
-func testRunInit(t testing.TB, opts GlobalOptions) {
- repository.TestUseLowSecurityKDFParameters(t)
- restic.TestDisableCheckPolynomial(t)
- restic.TestSetLockTimeout(t, 0)
-
- rtest.OK(t, runInit(context.TODO(), InitOptions{}, opts, nil))
- t.Logf("repository initialized at %v", opts.Repo)
-}
-
-func testRunBackupAssumeFailure(t testing.TB, dir string, target []string, opts BackupOptions, gopts GlobalOptions) error {
- ctx, cancel := context.WithCancel(context.TODO())
- defer cancel()
-
- var wg errgroup.Group
- term := termstatus.New(gopts.stdout, gopts.stderr, gopts.Quiet)
- wg.Go(func() error { term.Run(ctx); return nil })
-
- gopts.stdout = io.Discard
- t.Logf("backing up %v in %v", target, dir)
- if dir != "" {
- cleanup := rtest.Chdir(t, dir)
- defer cleanup()
- }
-
- backupErr := runBackup(ctx, opts, gopts, term, target)
-
- cancel()
-
- err := wg.Wait()
- if err != nil {
- t.Fatal(err)
- }
-
- return backupErr
-}
-
-func testRunBackup(t testing.TB, dir string, target []string, opts BackupOptions, gopts GlobalOptions) {
- err := testRunBackupAssumeFailure(t, dir, target, opts, gopts)
- rtest.Assert(t, err == nil, "Error while backing up")
-}
-
-func testRunList(t testing.TB, tpe string, opts GlobalOptions) restic.IDs {
- buf := bytes.NewBuffer(nil)
- globalOptions.stdout = buf
- defer func() {
- globalOptions.stdout = os.Stdout
- }()
-
- rtest.OK(t, runList(context.TODO(), cmdList, opts, []string{tpe}))
- return parseIDsFromReader(t, buf)
-}
-
-func testRunRestore(t testing.TB, opts GlobalOptions, dir string, snapshotID restic.ID) {
- testRunRestoreExcludes(t, opts, dir, snapshotID, nil)
-}
-
-func testRunRestoreLatest(t testing.TB, gopts GlobalOptions, dir string, paths []string, hosts []string) {
- opts := RestoreOptions{
- Target: dir,
- SnapshotFilter: restic.SnapshotFilter{
- Hosts: hosts,
- Paths: paths,
- },
- }
-
- rtest.OK(t, runRestore(context.TODO(), opts, gopts, []string{"latest"}))
-}
-
-func testRunRestoreExcludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, excludes []string) {
- opts := RestoreOptions{
- Target: dir,
- Exclude: excludes,
- }
-
- rtest.OK(t, runRestore(context.TODO(), opts, gopts, []string{snapshotID.String()}))
-}
-
-func testRunRestoreIncludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, includes []string) {
- opts := RestoreOptions{
- Target: dir,
- Include: includes,
- }
-
- rtest.OK(t, runRestore(context.TODO(), opts, gopts, []string{snapshotID.String()}))
-}
-
-func testRunRestoreAssumeFailure(t testing.TB, snapshotID string, opts RestoreOptions, gopts GlobalOptions) error {
- err := runRestore(context.TODO(), opts, gopts, []string{snapshotID})
-
- return err
-}
-
-func testRunCheck(t testing.TB, gopts GlobalOptions) {
- opts := CheckOptions{
- ReadData: true,
- CheckUnused: true,
- }
- rtest.OK(t, runCheck(context.TODO(), opts, gopts, nil))
-}
-
-func testRunCheckOutput(gopts GlobalOptions) (string, error) {
- buf := bytes.NewBuffer(nil)
-
- globalOptions.stdout = buf
- defer func() {
- globalOptions.stdout = os.Stdout
- }()
-
- opts := CheckOptions{
- ReadData: true,
- }
-
- err := runCheck(context.TODO(), opts, gopts, nil)
- return buf.String(), err
-}
-
-func testRunDiffOutput(gopts GlobalOptions, firstSnapshotID string, secondSnapshotID string) (string, error) {
- buf := bytes.NewBuffer(nil)
-
- globalOptions.stdout = buf
- oldStdout := gopts.stdout
- gopts.stdout = buf
- defer func() {
- globalOptions.stdout = os.Stdout
- gopts.stdout = oldStdout
- }()
-
- opts := DiffOptions{
- ShowMetadata: false,
- }
- err := runDiff(context.TODO(), opts, gopts, []string{firstSnapshotID, secondSnapshotID})
- return buf.String(), err
-}
-
-func testRunRebuildIndex(t testing.TB, gopts GlobalOptions) {
- globalOptions.stdout = io.Discard
- defer func() {
- globalOptions.stdout = os.Stdout
- }()
-
- rtest.OK(t, runRebuildIndex(context.TODO(), RebuildIndexOptions{}, gopts))
-}
-
-func testRunLs(t testing.TB, gopts GlobalOptions, snapshotID string) []string {
- buf := bytes.NewBuffer(nil)
- globalOptions.stdout = buf
- quiet := globalOptions.Quiet
- globalOptions.Quiet = true
- defer func() {
- globalOptions.stdout = os.Stdout
- globalOptions.Quiet = quiet
- }()
-
- opts := LsOptions{}
-
- rtest.OK(t, runLs(context.TODO(), opts, gopts, []string{snapshotID}))
-
- return strings.Split(buf.String(), "\n")
-}
-
-func testRunFind(t testing.TB, wantJSON bool, gopts GlobalOptions, pattern string) []byte {
- buf := bytes.NewBuffer(nil)
- globalOptions.stdout = buf
- globalOptions.JSON = wantJSON
- defer func() {
- globalOptions.stdout = os.Stdout
- globalOptions.JSON = false
- }()
-
- opts := FindOptions{}
-
- rtest.OK(t, runFind(context.TODO(), opts, gopts, []string{pattern}))
-
- return buf.Bytes()
-}
-
-func testRunSnapshots(t testing.TB, gopts GlobalOptions) (newest *Snapshot, snapmap map[restic.ID]Snapshot) {
- buf := bytes.NewBuffer(nil)
- globalOptions.stdout = buf
- globalOptions.JSON = true
- defer func() {
- globalOptions.stdout = os.Stdout
- globalOptions.JSON = gopts.JSON
- }()
-
- opts := SnapshotOptions{}
-
- rtest.OK(t, runSnapshots(context.TODO(), opts, globalOptions, []string{}))
-
- snapshots := []Snapshot{}
- rtest.OK(t, json.Unmarshal(buf.Bytes(), &snapshots))
-
- snapmap = make(map[restic.ID]Snapshot, len(snapshots))
- for _, sn := range snapshots {
- snapmap[*sn.ID] = sn
- if newest == nil || sn.Time.After(newest.Time) {
- newest = &sn
- }
- }
- return
-}
-
-func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) {
- opts := ForgetOptions{}
- rtest.OK(t, runForget(context.TODO(), opts, gopts, args))
-}
-
-func testRunForgetJSON(t testing.TB, gopts GlobalOptions, args ...string) {
- buf := bytes.NewBuffer(nil)
- oldJSON := gopts.JSON
- gopts.stdout = buf
- gopts.JSON = true
- defer func() {
- gopts.stdout = os.Stdout
- gopts.JSON = oldJSON
- }()
-
- opts := ForgetOptions{
- DryRun: true,
- Last: 1,
- }
-
- rtest.OK(t, runForget(context.TODO(), opts, gopts, args))
-
- var forgets []*ForgetGroup
- rtest.OK(t, json.Unmarshal(buf.Bytes(), &forgets))
-
- rtest.Assert(t, len(forgets) == 1,
- "Expected 1 snapshot group, got %v", len(forgets))
- rtest.Assert(t, len(forgets[0].Keep) == 1,
- "Expected 1 snapshot to be kept, got %v", len(forgets[0].Keep))
- rtest.Assert(t, len(forgets[0].Remove) == 2,
- "Expected 2 snapshots to be removed, got %v", len(forgets[0].Remove))
-}
-
-func testRunPrune(t testing.TB, gopts GlobalOptions, opts PruneOptions) {
- oldHook := gopts.backendTestHook
- gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) { return newListOnceBackend(r), nil }
- defer func() {
- gopts.backendTestHook = oldHook
- }()
- rtest.OK(t, runPrune(context.TODO(), opts, gopts))
-}
-
-func testSetupBackupData(t testing.TB, env *testEnvironment) string {
- datafile := filepath.Join("testdata", "backup-data.tar.gz")
- testRunInit(t, env.gopts)
- rtest.SetupTarTestFixture(t, env.testdata, datafile)
- return datafile
-}
-
-func TestBackup(t *testing.T) {
- testBackup(t, false)
-}
-
-func TestBackupWithFilesystemSnapshots(t *testing.T) {
- if runtime.GOOS == "windows" && fs.HasSufficientPrivilegesForVSS() == nil {
- testBackup(t, true)
- }
-}
-
-func testBackup(t *testing.T, useFsSnapshot bool) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testSetupBackupData(t, env)
- opts := BackupOptions{UseFsSnapshot: useFsSnapshot}
-
- // first backup
- testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
- snapshotIDs := testRunList(t, "snapshots", env.gopts)
- rtest.Assert(t, len(snapshotIDs) == 1,
- "expected one snapshot, got %v", snapshotIDs)
-
- testRunCheck(t, env.gopts)
- stat1 := dirStats(env.repo)
-
- // second backup, implicit incremental
- testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
- snapshotIDs = testRunList(t, "snapshots", env.gopts)
- rtest.Assert(t, len(snapshotIDs) == 2,
- "expected two snapshots, got %v", snapshotIDs)
-
- stat2 := dirStats(env.repo)
- if stat2.size > stat1.size+stat1.size/10 {
- t.Error("repository size has grown by more than 10 percent")
- }
- t.Logf("repository grown by %d bytes", stat2.size-stat1.size)
-
- testRunCheck(t, env.gopts)
- // third backup, explicit incremental
- opts.Parent = snapshotIDs[0].String()
- testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
- snapshotIDs = testRunList(t, "snapshots", env.gopts)
- rtest.Assert(t, len(snapshotIDs) == 3,
- "expected three snapshots, got %v", snapshotIDs)
-
- stat3 := dirStats(env.repo)
- if stat3.size > stat1.size+stat1.size/10 {
- t.Error("repository size has grown by more than 10 percent")
- }
- t.Logf("repository grown by %d bytes", stat3.size-stat2.size)
-
- // restore all backups and compare
- for i, snapshotID := range snapshotIDs {
- restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
- t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir)
- testRunRestore(t, env.gopts, restoredir, snapshotID)
- diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata"))
- rtest.Assert(t, diff == "", "directories are not equal: %v", diff)
- }
-
- testRunCheck(t, env.gopts)
-}
-
-func TestDryRunBackup(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testSetupBackupData(t, env)
- opts := BackupOptions{}
- dryOpts := BackupOptions{DryRun: true}
-
- // dry run before first backup
- testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, dryOpts, env.gopts)
- snapshotIDs := testRunList(t, "snapshots", env.gopts)
- rtest.Assert(t, len(snapshotIDs) == 0,
- "expected no snapshot, got %v", snapshotIDs)
- packIDs := testRunList(t, "packs", env.gopts)
- rtest.Assert(t, len(packIDs) == 0,
- "expected no data, got %v", snapshotIDs)
- indexIDs := testRunList(t, "index", env.gopts)
- rtest.Assert(t, len(indexIDs) == 0,
- "expected no index, got %v", snapshotIDs)
-
- // first backup
- testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
- snapshotIDs = testRunList(t, "snapshots", env.gopts)
- packIDs = testRunList(t, "packs", env.gopts)
- indexIDs = testRunList(t, "index", env.gopts)
-
- // dry run between backups
- testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, dryOpts, env.gopts)
- snapshotIDsAfter := testRunList(t, "snapshots", env.gopts)
- rtest.Equals(t, snapshotIDs, snapshotIDsAfter)
- dataIDsAfter := testRunList(t, "packs", env.gopts)
- rtest.Equals(t, packIDs, dataIDsAfter)
- indexIDsAfter := testRunList(t, "index", env.gopts)
- rtest.Equals(t, indexIDs, indexIDsAfter)
-
- // second backup, implicit incremental
- testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
- snapshotIDs = testRunList(t, "snapshots", env.gopts)
- packIDs = testRunList(t, "packs", env.gopts)
- indexIDs = testRunList(t, "index", env.gopts)
-
- // another dry run
- testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, dryOpts, env.gopts)
- snapshotIDsAfter = testRunList(t, "snapshots", env.gopts)
- rtest.Equals(t, snapshotIDs, snapshotIDsAfter)
- dataIDsAfter = testRunList(t, "packs", env.gopts)
- rtest.Equals(t, packIDs, dataIDsAfter)
- indexIDsAfter = testRunList(t, "index", env.gopts)
- rtest.Equals(t, indexIDs, indexIDsAfter)
-}
-
-func TestBackupNonExistingFile(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testSetupBackupData(t, env)
- globalOptions.stderr = io.Discard
- defer func() {
- globalOptions.stderr = os.Stderr
- }()
-
- p := filepath.Join(env.testdata, "0", "0", "9")
- dirs := []string{
- filepath.Join(p, "0"),
- filepath.Join(p, "1"),
- filepath.Join(p, "nonexisting"),
- filepath.Join(p, "5"),
- }
-
- opts := BackupOptions{}
-
- testRunBackup(t, "", dirs, opts, env.gopts)
-}
-
-func removePacksExcept(gopts GlobalOptions, t *testing.T, keep restic.IDSet, removeTreePacks bool) {
- r, err := OpenRepository(context.TODO(), gopts)
- rtest.OK(t, err)
-
- // Get all tree packs
- rtest.OK(t, r.LoadIndex(context.TODO()))
-
- treePacks := restic.NewIDSet()
- r.Index().Each(context.TODO(), func(pb restic.PackedBlob) {
- if pb.Type == restic.TreeBlob {
- treePacks.Insert(pb.PackID)
- }
- })
-
- // remove all packs containing data blobs
- rtest.OK(t, r.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error {
- if treePacks.Has(id) != removeTreePacks || keep.Has(id) {
- return nil
- }
- return r.Backend().Remove(context.TODO(), restic.Handle{Type: restic.PackFile, Name: id.String()})
- }))
-}
-
-func TestBackupSelfHealing(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testRunInit(t, env.gopts)
-
- p := filepath.Join(env.testdata, "test/test")
- rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
- rtest.OK(t, appendRandomData(p, 5))
-
- opts := BackupOptions{}
-
- testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
- testRunCheck(t, env.gopts)
-
- // remove all data packs
- removePacksExcept(env.gopts, t, restic.NewIDSet(), false)
-
- testRunRebuildIndex(t, env.gopts)
- // now the repo is also missing the data blob in the index; check should report this
- rtest.Assert(t, runCheck(context.TODO(), CheckOptions{}, env.gopts, nil) != nil,
- "check should have reported an error")
-
- // second backup should report an error but "heal" this situation
- err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
- rtest.Assert(t, err != nil,
- "backup should have reported an error")
- testRunCheck(t, env.gopts)
-}
-
-func TestBackupTreeLoadError(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testRunInit(t, env.gopts)
- p := filepath.Join(env.testdata, "test/test")
- rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
- rtest.OK(t, appendRandomData(p, 5))
-
- opts := BackupOptions{}
- // Backup a subdirectory first, such that we can remove the tree pack for the subdirectory
- testRunBackup(t, env.testdata, []string{"test"}, opts, env.gopts)
-
- r, err := OpenRepository(context.TODO(), env.gopts)
- rtest.OK(t, err)
- rtest.OK(t, r.LoadIndex(context.TODO()))
- treePacks := restic.NewIDSet()
- r.Index().Each(context.TODO(), func(pb restic.PackedBlob) {
- if pb.Type == restic.TreeBlob {
- treePacks.Insert(pb.PackID)
- }
- })
-
- testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
- testRunCheck(t, env.gopts)
-
- // delete the subdirectory pack first
- for id := range treePacks {
- rtest.OK(t, r.Backend().Remove(context.TODO(), restic.Handle{Type: restic.PackFile, Name: id.String()}))
- }
- testRunRebuildIndex(t, env.gopts)
- // now the repo is missing the tree blob in the index; check should report this
- rtest.Assert(t, runCheck(context.TODO(), CheckOptions{}, env.gopts, nil) != nil, "check should have reported an error")
- // second backup should report an error but "heal" this situation
- err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
- rtest.Assert(t, err != nil, "backup should have reported an error for the subdirectory")
- testRunCheck(t, env.gopts)
-
- // remove all tree packs
- removePacksExcept(env.gopts, t, restic.NewIDSet(), true)
- testRunRebuildIndex(t, env.gopts)
- // now the repo is also missing the data blob in the index; check should report this
- rtest.Assert(t, runCheck(context.TODO(), CheckOptions{}, env.gopts, nil) != nil, "check should have reported an error")
- // second backup should report an error but "heal" this situation
- err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
- rtest.Assert(t, err != nil, "backup should have reported an error")
- testRunCheck(t, env.gopts)
-}
-
-func includes(haystack []string, needle string) bool {
- for _, s := range haystack {
- if s == needle {
- return true
- }
- }
-
- return false
-}
-
-func loadSnapshotMap(t testing.TB, gopts GlobalOptions) map[string]struct{} {
- snapshotIDs := testRunList(t, "snapshots", gopts)
-
- m := make(map[string]struct{})
- for _, id := range snapshotIDs {
- m[id.String()] = struct{}{}
- }
-
- return m
-}
-
-func lastSnapshot(old, new map[string]struct{}) (map[string]struct{}, string) {
- for k := range new {
- if _, ok := old[k]; !ok {
- old[k] = struct{}{}
- return old, k
- }
- }
-
- return old, ""
-}
-
-var backupExcludeFilenames = []string{
- "testfile1",
- "foo.tar.gz",
- "private/secret/passwords.txt",
- "work/source/test.c",
-}
-
-func TestBackupExclude(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testRunInit(t, env.gopts)
-
- datadir := filepath.Join(env.base, "testdata")
-
- for _, filename := range backupExcludeFilenames {
- fp := filepath.Join(datadir, filename)
- rtest.OK(t, os.MkdirAll(filepath.Dir(fp), 0755))
-
- f, err := os.Create(fp)
- rtest.OK(t, err)
-
- fmt.Fprint(f, filename)
- rtest.OK(t, f.Close())
- }
-
- snapshots := make(map[string]struct{})
-
- opts := BackupOptions{}
-
- testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
- snapshots, snapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
- files := testRunLs(t, env.gopts, snapshotID)
- rtest.Assert(t, includes(files, "/testdata/foo.tar.gz"),
- "expected file %q in first snapshot, but it's not included", "foo.tar.gz")
-
- opts.Excludes = []string{"*.tar.gz"}
- testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
- snapshots, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
- files = testRunLs(t, env.gopts, snapshotID)
- rtest.Assert(t, !includes(files, "/testdata/foo.tar.gz"),
- "expected file %q not in first snapshot, but it's included", "foo.tar.gz")
-
- opts.Excludes = []string{"*.tar.gz", "private/secret"}
- testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
- _, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
- files = testRunLs(t, env.gopts, snapshotID)
- rtest.Assert(t, !includes(files, "/testdata/foo.tar.gz"),
- "expected file %q not in first snapshot, but it's included", "foo.tar.gz")
- rtest.Assert(t, !includes(files, "/testdata/private/secret/passwords.txt"),
- "expected file %q not in first snapshot, but it's included", "passwords.txt")
-}
-
-func TestBackupErrors(t *testing.T) {
- if runtime.GOOS == "windows" {
- return
- }
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testSetupBackupData(t, env)
-
- // Assume failure
- inaccessibleFile := filepath.Join(env.testdata, "0", "0", "9", "0")
- rtest.OK(t, os.Chmod(inaccessibleFile, 0000))
- defer func() {
- rtest.OK(t, os.Chmod(inaccessibleFile, 0644))
- }()
- opts := BackupOptions{}
- gopts := env.gopts
- gopts.stderr = io.Discard
- err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, gopts)
- rtest.Assert(t, err != nil, "Assumed failure, but no error occurred.")
- rtest.Assert(t, err == ErrInvalidSourceData, "Wrong error returned")
- snapshotIDs := testRunList(t, "snapshots", env.gopts)
- rtest.Assert(t, len(snapshotIDs) == 1,
- "expected one snapshot, got %v", snapshotIDs)
-}
-
-const (
- incrementalFirstWrite = 10 * 1042 * 1024
- incrementalSecondWrite = 1 * 1042 * 1024
- incrementalThirdWrite = 1 * 1042 * 1024
-)
-
-func appendRandomData(filename string, bytes uint) error {
- f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- fmt.Fprint(os.Stderr, err)
- return err
- }
-
- _, err = f.Seek(0, 2)
- if err != nil {
- fmt.Fprint(os.Stderr, err)
- return err
- }
-
- _, err = io.Copy(f, io.LimitReader(rand.Reader, int64(bytes)))
- if err != nil {
- fmt.Fprint(os.Stderr, err)
- return err
- }
-
- return f.Close()
-}
-
-func TestIncrementalBackup(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testRunInit(t, env.gopts)
-
- datadir := filepath.Join(env.base, "testdata")
- testfile := filepath.Join(datadir, "testfile")
-
- rtest.OK(t, appendRandomData(testfile, incrementalFirstWrite))
-
- opts := BackupOptions{}
-
- testRunBackup(t, "", []string{datadir}, opts, env.gopts)
- testRunCheck(t, env.gopts)
- stat1 := dirStats(env.repo)
-
- rtest.OK(t, appendRandomData(testfile, incrementalSecondWrite))
-
- testRunBackup(t, "", []string{datadir}, opts, env.gopts)
- testRunCheck(t, env.gopts)
- stat2 := dirStats(env.repo)
- if stat2.size-stat1.size > incrementalFirstWrite {
- t.Errorf("repository size has grown by more than %d bytes", incrementalFirstWrite)
- }
- t.Logf("repository grown by %d bytes", stat2.size-stat1.size)
-
- rtest.OK(t, appendRandomData(testfile, incrementalThirdWrite))
-
- testRunBackup(t, "", []string{datadir}, opts, env.gopts)
- testRunCheck(t, env.gopts)
- stat3 := dirStats(env.repo)
- if stat3.size-stat2.size > incrementalFirstWrite {
- t.Errorf("repository size has grown by more than %d bytes", incrementalFirstWrite)
- }
- t.Logf("repository grown by %d bytes", stat3.size-stat2.size)
-}
-
-func TestBackupTags(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testSetupBackupData(t, env)
- opts := BackupOptions{}
-
- testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
- testRunCheck(t, env.gopts)
- newest, _ := testRunSnapshots(t, env.gopts)
-
- if newest == nil {
- t.Fatal("expected a backup, got nil")
- }
-
- rtest.Assert(t, len(newest.Tags) == 0,
- "expected no tags, got %v", newest.Tags)
- parent := newest
-
- opts.Tags = restic.TagLists{[]string{"NL"}}
- testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
- testRunCheck(t, env.gopts)
- newest, _ = testRunSnapshots(t, env.gopts)
-
- if newest == nil {
- t.Fatal("expected a backup, got nil")
- }
-
- rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL",
- "expected one NL tag, got %v", newest.Tags)
- // Tagged backup should have untagged backup as parent.
- rtest.Assert(t, parent.ID.Equal(*newest.Parent),
- "expected parent to be %v, got %v", parent.ID, newest.Parent)
-}
-
-func testRunCopy(t testing.TB, srcGopts GlobalOptions, dstGopts GlobalOptions) {
- gopts := srcGopts
- gopts.Repo = dstGopts.Repo
- gopts.password = dstGopts.password
- copyOpts := CopyOptions{
- secondaryRepoOptions: secondaryRepoOptions{
- Repo: srcGopts.Repo,
- password: srcGopts.password,
- },
- }
-
- rtest.OK(t, runCopy(context.TODO(), copyOpts, gopts, nil))
-}
-
-func TestCopy(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
- env2, cleanup2 := withTestEnvironment(t)
- defer cleanup2()
-
- testSetupBackupData(t, env)
- opts := BackupOptions{}
- testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
- testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
- testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
- testRunCheck(t, env.gopts)
-
- testRunInit(t, env2.gopts)
- testRunCopy(t, env.gopts, env2.gopts)
-
- snapshotIDs := testRunList(t, "snapshots", env.gopts)
- copiedSnapshotIDs := testRunList(t, "snapshots", env2.gopts)
-
- // Check that the copies size seems reasonable
- rtest.Assert(t, len(snapshotIDs) == len(copiedSnapshotIDs), "expected %v snapshots, found %v",
- len(snapshotIDs), len(copiedSnapshotIDs))
- stat := dirStats(env.repo)
- stat2 := dirStats(env2.repo)
- sizeDiff := int64(stat.size) - int64(stat2.size)
- if sizeDiff < 0 {
- sizeDiff = -sizeDiff
- }
- rtest.Assert(t, sizeDiff < int64(stat.size)/50, "expected less than 2%% size difference: %v vs. %v",
- stat.size, stat2.size)
-
- // Check integrity of the copy
- testRunCheck(t, env2.gopts)
-
- // Check that the copied snapshots have the same tree contents as the old ones (= identical tree hash)
- origRestores := make(map[string]struct{})
- for i, snapshotID := range snapshotIDs {
- restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
- origRestores[restoredir] = struct{}{}
- testRunRestore(t, env.gopts, restoredir, snapshotID)
- }
- for i, snapshotID := range copiedSnapshotIDs {
- restoredir := filepath.Join(env2.base, fmt.Sprintf("restore%d", i))
- testRunRestore(t, env2.gopts, restoredir, snapshotID)
- foundMatch := false
- for cmpdir := range origRestores {
- diff := directoriesContentsDiff(restoredir, cmpdir)
- if diff == "" {
- delete(origRestores, cmpdir)
- foundMatch = true
- }
- }
-
- rtest.Assert(t, foundMatch, "found no counterpart for snapshot %v", snapshotID)
- }
-
- rtest.Assert(t, len(origRestores) == 0, "found not copied snapshots")
-}
-
-func TestCopyIncremental(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
- env2, cleanup2 := withTestEnvironment(t)
- defer cleanup2()
-
- testSetupBackupData(t, env)
- opts := BackupOptions{}
- testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
- testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
- testRunCheck(t, env.gopts)
-
- testRunInit(t, env2.gopts)
- testRunCopy(t, env.gopts, env2.gopts)
-
- snapshotIDs := testRunList(t, "snapshots", env.gopts)
- copiedSnapshotIDs := testRunList(t, "snapshots", env2.gopts)
-
- // Check that the copies size seems reasonable
- testRunCheck(t, env2.gopts)
- rtest.Assert(t, len(snapshotIDs) == len(copiedSnapshotIDs), "expected %v snapshots, found %v",
- len(snapshotIDs), len(copiedSnapshotIDs))
-
- // check that no snapshots are copied, as there are no new ones
- testRunCopy(t, env.gopts, env2.gopts)
- testRunCheck(t, env2.gopts)
- copiedSnapshotIDs = testRunList(t, "snapshots", env2.gopts)
- rtest.Assert(t, len(snapshotIDs) == len(copiedSnapshotIDs), "still expected %v snapshots, found %v",
- len(snapshotIDs), len(copiedSnapshotIDs))
-
- // check that only new snapshots are copied
- testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
- testRunCopy(t, env.gopts, env2.gopts)
- testRunCheck(t, env2.gopts)
- snapshotIDs = testRunList(t, "snapshots", env.gopts)
- copiedSnapshotIDs = testRunList(t, "snapshots", env2.gopts)
- rtest.Assert(t, len(snapshotIDs) == len(copiedSnapshotIDs), "still expected %v snapshots, found %v",
- len(snapshotIDs), len(copiedSnapshotIDs))
-
- // also test the reverse direction
- testRunCopy(t, env2.gopts, env.gopts)
- testRunCheck(t, env.gopts)
- snapshotIDs = testRunList(t, "snapshots", env.gopts)
- rtest.Assert(t, len(snapshotIDs) == len(copiedSnapshotIDs), "still expected %v snapshots, found %v",
- len(copiedSnapshotIDs), len(snapshotIDs))
-}
-
-func TestCopyUnstableJSON(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
- env2, cleanup2 := withTestEnvironment(t)
- defer cleanup2()
-
- // contains a symlink created using `ln -s '../i/'$'\355\246\361''d/samba' broken-symlink`
- datafile := filepath.Join("testdata", "copy-unstable-json.tar.gz")
- rtest.SetupTarTestFixture(t, env.base, datafile)
-
- testRunInit(t, env2.gopts)
- testRunCopy(t, env.gopts, env2.gopts)
- testRunCheck(t, env2.gopts)
-
- copiedSnapshotIDs := testRunList(t, "snapshots", env2.gopts)
- rtest.Assert(t, 1 == len(copiedSnapshotIDs), "still expected %v snapshot, found %v",
- 1, len(copiedSnapshotIDs))
-}
-
-func TestInitCopyChunkerParams(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
- env2, cleanup2 := withTestEnvironment(t)
- defer cleanup2()
-
- testRunInit(t, env2.gopts)
-
- initOpts := InitOptions{
- secondaryRepoOptions: secondaryRepoOptions{
- Repo: env2.gopts.Repo,
- password: env2.gopts.password,
- },
- }
- rtest.Assert(t, runInit(context.TODO(), initOpts, env.gopts, nil) != nil, "expected invalid init options to fail")
-
- initOpts.CopyChunkerParameters = true
- rtest.OK(t, runInit(context.TODO(), initOpts, env.gopts, nil))
-
- repo, err := OpenRepository(context.TODO(), env.gopts)
- rtest.OK(t, err)
-
- otherRepo, err := OpenRepository(context.TODO(), env2.gopts)
- rtest.OK(t, err)
-
- rtest.Assert(t, repo.Config().ChunkerPolynomial == otherRepo.Config().ChunkerPolynomial,
- "expected equal chunker polynomials, got %v expected %v", repo.Config().ChunkerPolynomial,
- otherRepo.Config().ChunkerPolynomial)
-}
-
-func testRunTag(t testing.TB, opts TagOptions, gopts GlobalOptions) {
- rtest.OK(t, runTag(context.TODO(), opts, gopts, []string{}))
-}
-
-func TestTag(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testSetupBackupData(t, env)
- testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
- testRunCheck(t, env.gopts)
- newest, _ := testRunSnapshots(t, env.gopts)
- if newest == nil {
- t.Fatal("expected a new backup, got nil")
- }
-
- rtest.Assert(t, len(newest.Tags) == 0,
- "expected no tags, got %v", newest.Tags)
- rtest.Assert(t, newest.Original == nil,
- "expected original ID to be nil, got %v", newest.Original)
- originalID := *newest.ID
-
- testRunTag(t, TagOptions{SetTags: restic.TagLists{[]string{"NL"}}}, env.gopts)
- testRunCheck(t, env.gopts)
- newest, _ = testRunSnapshots(t, env.gopts)
- if newest == nil {
- t.Fatal("expected a backup, got nil")
- }
- rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL",
- "set failed, expected one NL tag, got %v", newest.Tags)
- rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
- rtest.Assert(t, *newest.Original == originalID,
- "expected original ID to be set to the first snapshot id")
-
- testRunTag(t, TagOptions{AddTags: restic.TagLists{[]string{"CH"}}}, env.gopts)
- testRunCheck(t, env.gopts)
- newest, _ = testRunSnapshots(t, env.gopts)
- if newest == nil {
- t.Fatal("expected a backup, got nil")
- }
- rtest.Assert(t, len(newest.Tags) == 2 && newest.Tags[0] == "NL" && newest.Tags[1] == "CH",
- "add failed, expected CH,NL tags, got %v", newest.Tags)
- rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
- rtest.Assert(t, *newest.Original == originalID,
- "expected original ID to be set to the first snapshot id")
-
- testRunTag(t, TagOptions{RemoveTags: restic.TagLists{[]string{"NL"}}}, env.gopts)
- testRunCheck(t, env.gopts)
- newest, _ = testRunSnapshots(t, env.gopts)
- if newest == nil {
- t.Fatal("expected a backup, got nil")
- }
- rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "CH",
- "remove failed, expected one CH tag, got %v", newest.Tags)
- rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
- rtest.Assert(t, *newest.Original == originalID,
- "expected original ID to be set to the first snapshot id")
-
- testRunTag(t, TagOptions{AddTags: restic.TagLists{[]string{"US", "RU"}}}, env.gopts)
- testRunTag(t, TagOptions{RemoveTags: restic.TagLists{[]string{"CH", "US", "RU"}}}, env.gopts)
- testRunCheck(t, env.gopts)
- newest, _ = testRunSnapshots(t, env.gopts)
- if newest == nil {
- t.Fatal("expected a backup, got nil")
- }
- rtest.Assert(t, len(newest.Tags) == 0,
- "expected no tags, got %v", newest.Tags)
- rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
- rtest.Assert(t, *newest.Original == originalID,
- "expected original ID to be set to the first snapshot id")
-
- // Check special case of removing all tags.
- testRunTag(t, TagOptions{SetTags: restic.TagLists{[]string{""}}}, env.gopts)
- testRunCheck(t, env.gopts)
- newest, _ = testRunSnapshots(t, env.gopts)
- if newest == nil {
- t.Fatal("expected a backup, got nil")
- }
- rtest.Assert(t, len(newest.Tags) == 0,
- "expected no tags, got %v", newest.Tags)
- rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
- rtest.Assert(t, *newest.Original == originalID,
- "expected original ID to be set to the first snapshot id")
-}
-
-func testRunKeyListOtherIDs(t testing.TB, gopts GlobalOptions) []string {
- buf := bytes.NewBuffer(nil)
-
- globalOptions.stdout = buf
- defer func() {
- globalOptions.stdout = os.Stdout
- }()
-
- rtest.OK(t, runKey(context.TODO(), gopts, []string{"list"}))
-
- scanner := bufio.NewScanner(buf)
- exp := regexp.MustCompile(`^ ([a-f0-9]+) `)
-
- IDs := []string{}
- for scanner.Scan() {
- if id := exp.FindStringSubmatch(scanner.Text()); id != nil {
- IDs = append(IDs, id[1])
- }
- }
-
- return IDs
-}
-
-func testRunKeyAddNewKey(t testing.TB, newPassword string, gopts GlobalOptions) {
- testKeyNewPassword = newPassword
- defer func() {
- testKeyNewPassword = ""
- }()
-
- rtest.OK(t, runKey(context.TODO(), gopts, []string{"add"}))
-}
-
-func testRunKeyAddNewKeyUserHost(t testing.TB, gopts GlobalOptions) {
- testKeyNewPassword = "john's geheimnis"
- defer func() {
- testKeyNewPassword = ""
- keyUsername = ""
- keyHostname = ""
- }()
-
- rtest.OK(t, cmdKey.Flags().Parse([]string{"--user=john", "--host=example.com"}))
-
- t.Log("adding key for john@example.com")
- rtest.OK(t, runKey(context.TODO(), gopts, []string{"add"}))
-
- repo, err := OpenRepository(context.TODO(), gopts)
- rtest.OK(t, err)
- key, err := repository.SearchKey(context.TODO(), repo, testKeyNewPassword, 2, "")
- rtest.OK(t, err)
-
- rtest.Equals(t, "john", key.Username)
- rtest.Equals(t, "example.com", key.Hostname)
-}
-
-func testRunKeyPasswd(t testing.TB, newPassword string, gopts GlobalOptions) {
- testKeyNewPassword = newPassword
- defer func() {
- testKeyNewPassword = ""
- }()
-
- rtest.OK(t, runKey(context.TODO(), gopts, []string{"passwd"}))
-}
-
-func testRunKeyRemove(t testing.TB, gopts GlobalOptions, IDs []string) {
- t.Logf("remove %d keys: %q\n", len(IDs), IDs)
- for _, id := range IDs {
- rtest.OK(t, runKey(context.TODO(), gopts, []string{"remove", id}))
- }
-}
-
-func TestKeyAddRemove(t *testing.T) {
- passwordList := []string{
- "OnnyiasyatvodsEvVodyawit",
- "raicneirvOjEfEigonOmLasOd",
- }
-
- env, cleanup := withTestEnvironment(t)
- // must list keys more than once
- env.gopts.backendTestHook = nil
- defer cleanup()
-
- testRunInit(t, env.gopts)
-
- testRunKeyPasswd(t, "geheim2", env.gopts)
- env.gopts.password = "geheim2"
- t.Logf("changed password to %q", env.gopts.password)
-
- for _, newPassword := range passwordList {
- testRunKeyAddNewKey(t, newPassword, env.gopts)
- t.Logf("added new password %q", newPassword)
- env.gopts.password = newPassword
- testRunKeyRemove(t, env.gopts, testRunKeyListOtherIDs(t, env.gopts))
- }
-
- env.gopts.password = passwordList[len(passwordList)-1]
- t.Logf("testing access with last password %q\n", env.gopts.password)
- rtest.OK(t, runKey(context.TODO(), env.gopts, []string{"list"}))
- testRunCheck(t, env.gopts)
-
- testRunKeyAddNewKeyUserHost(t, env.gopts)
-}
-
-type emptySaveBackend struct {
- restic.Backend
-}
-
-func (b *emptySaveBackend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
- return b.Backend.Save(ctx, h, restic.NewByteReader([]byte{}, nil))
-}
-
-func TestKeyProblems(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testRunInit(t, env.gopts)
- env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) {
- return &emptySaveBackend{r}, nil
- }
-
- testKeyNewPassword = "geheim2"
- defer func() {
- testKeyNewPassword = ""
- }()
-
- err := runKey(context.TODO(), env.gopts, []string{"passwd"})
- t.Log(err)
- rtest.Assert(t, err != nil, "expected passwd change to fail")
-
- err = runKey(context.TODO(), env.gopts, []string{"add"})
- t.Log(err)
- rtest.Assert(t, err != nil, "expected key adding to fail")
-
- t.Logf("testing access with initial password %q\n", env.gopts.password)
- rtest.OK(t, runKey(context.TODO(), env.gopts, []string{"list"}))
- testRunCheck(t, env.gopts)
-}
-
-func testFileSize(filename string, size int64) error {
- fi, err := os.Stat(filename)
- if err != nil {
- return err
- }
-
- if fi.Size() != size {
- return errors.Fatalf("wrong file size for %v: expected %v, got %v", filename, size, fi.Size())
- }
-
- return nil
-}
-
-func TestRestoreFilter(t *testing.T) {
- testfiles := []struct {
- name string
- size uint
- }{
- {"testfile1.c", 100},
- {"testfile2.exe", 101},
- {"subdir1/subdir2/testfile3.docx", 102},
- {"subdir1/subdir2/testfile4.c", 102},
- }
-
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testRunInit(t, env.gopts)
-
- for _, testFile := range testfiles {
- p := filepath.Join(env.testdata, testFile.name)
- rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
- rtest.OK(t, appendRandomData(p, testFile.size))
- }
-
- opts := BackupOptions{}
-
- testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
- testRunCheck(t, env.gopts)
-
- snapshotID := testRunList(t, "snapshots", env.gopts)[0]
-
- // no restore filter should restore all files
- testRunRestore(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID)
- for _, testFile := range testfiles {
- rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", testFile.name), int64(testFile.size)))
- }
-
- for i, pat := range []string{"*.c", "*.exe", "*", "*file3*"} {
- base := filepath.Join(env.base, fmt.Sprintf("restore%d", i+1))
- testRunRestoreExcludes(t, env.gopts, base, snapshotID, []string{pat})
- for _, testFile := range testfiles {
- err := testFileSize(filepath.Join(base, "testdata", testFile.name), int64(testFile.size))
- if ok, _ := filter.Match(pat, filepath.Base(testFile.name)); !ok {
- rtest.OK(t, err)
- } else {
- rtest.Assert(t, os.IsNotExist(err),
- "expected %v to not exist in restore step %v, but it exists, err %v", testFile.name, i+1, err)
- }
- }
- }
-}
-
-func TestRestore(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testRunInit(t, env.gopts)
-
- for i := 0; i < 10; i++ {
- p := filepath.Join(env.testdata, fmt.Sprintf("foo/bar/testfile%v", i))
- rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
- rtest.OK(t, appendRandomData(p, uint(mrand.Intn(2<<21))))
- }
-
- opts := BackupOptions{}
-
- testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
- testRunCheck(t, env.gopts)
-
- // Restore latest without any filters
- restoredir := filepath.Join(env.base, "restore")
- testRunRestoreLatest(t, env.gopts, restoredir, nil, nil)
-
- diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, filepath.Base(env.testdata)))
- rtest.Assert(t, diff == "", "directories are not equal %v", diff)
-}
-
-func TestRestoreLatest(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testRunInit(t, env.gopts)
-
- p := filepath.Join(env.testdata, "testfile.c")
- rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
- rtest.OK(t, appendRandomData(p, 100))
-
- opts := BackupOptions{}
-
- // chdir manually here so we can get the current directory. This is not the
- // same as the temp dir returned by os.MkdirTemp() on darwin.
- back := rtest.Chdir(t, filepath.Dir(env.testdata))
- defer back()
-
- curdir, err := os.Getwd()
- if err != nil {
- t.Fatal(err)
- }
-
- testRunBackup(t, "", []string{filepath.Base(env.testdata)}, opts, env.gopts)
- testRunCheck(t, env.gopts)
-
- rtest.OK(t, os.Remove(p))
- rtest.OK(t, appendRandomData(p, 101))
- testRunBackup(t, "", []string{filepath.Base(env.testdata)}, opts, env.gopts)
- testRunCheck(t, env.gopts)
-
- // Restore latest without any filters
- testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore0"), nil, nil)
- rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", "testfile.c"), int64(101)))
-
- // Setup test files in different directories backed up in different snapshots
- p1 := filepath.Join(curdir, filepath.FromSlash("p1/testfile.c"))
-
- rtest.OK(t, os.MkdirAll(filepath.Dir(p1), 0755))
- rtest.OK(t, appendRandomData(p1, 102))
- testRunBackup(t, "", []string{"p1"}, opts, env.gopts)
- testRunCheck(t, env.gopts)
-
- p2 := filepath.Join(curdir, filepath.FromSlash("p2/testfile.c"))
-
- rtest.OK(t, os.MkdirAll(filepath.Dir(p2), 0755))
- rtest.OK(t, appendRandomData(p2, 103))
- testRunBackup(t, "", []string{"p2"}, opts, env.gopts)
- testRunCheck(t, env.gopts)
-
- p1rAbs := filepath.Join(env.base, "restore1", "p1/testfile.c")
- p2rAbs := filepath.Join(env.base, "restore2", "p2/testfile.c")
-
- testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore1"), []string{filepath.Dir(p1)}, nil)
- rtest.OK(t, testFileSize(p1rAbs, int64(102)))
- if _, err := os.Stat(p2rAbs); os.IsNotExist(err) {
- rtest.Assert(t, os.IsNotExist(err),
- "expected %v to not exist in restore, but it exists, err %v", p2rAbs, err)
- }
-
- testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore2"), []string{filepath.Dir(p2)}, nil)
- rtest.OK(t, testFileSize(p2rAbs, int64(103)))
- if _, err := os.Stat(p1rAbs); os.IsNotExist(err) {
- rtest.Assert(t, os.IsNotExist(err),
- "expected %v to not exist in restore, but it exists, err %v", p1rAbs, err)
- }
-}
-
-func TestRestoreWithPermissionFailure(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- datafile := filepath.Join("testdata", "repo-restore-permissions-test.tar.gz")
- rtest.SetupTarTestFixture(t, env.base, datafile)
-
- snapshots := testRunList(t, "snapshots", env.gopts)
- rtest.Assert(t, len(snapshots) > 0,
- "no snapshots found in repo (%v)", datafile)
-
- globalOptions.stderr = io.Discard
- defer func() {
- globalOptions.stderr = os.Stderr
- }()
-
- testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshots[0])
-
- // make sure that all files have been restored, regardless of any
- // permission errors
- files := testRunLs(t, env.gopts, snapshots[0].String())
- for _, filename := range files {
- fi, err := os.Lstat(filepath.Join(env.base, "restore", filename))
- rtest.OK(t, err)
-
- rtest.Assert(t, !isFile(fi) || fi.Size() > 0,
- "file %v restored, but filesize is 0", filename)
- }
-}
-
-func setZeroModTime(filename string) error {
- var utimes = []syscall.Timespec{
- syscall.NsecToTimespec(0),
- syscall.NsecToTimespec(0),
- }
-
- return syscall.UtimesNano(filename, utimes)
-}
-
-func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testRunInit(t, env.gopts)
-
- p := filepath.Join(env.testdata, "subdir1", "subdir2", "subdir3", "file.ext")
- rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
- rtest.OK(t, appendRandomData(p, 200))
- rtest.OK(t, setZeroModTime(filepath.Join(env.testdata, "subdir1", "subdir2")))
-
- opts := BackupOptions{}
-
- testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
- testRunCheck(t, env.gopts)
-
- snapshotID := testRunList(t, "snapshots", env.gopts)[0]
-
- // restore with filter "*.ext", this should restore "file.ext", but
- // since the directories are ignored and only created because of
- // "file.ext", no meta data should be restored for them.
- testRunRestoreIncludes(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID, []string{"*.ext"})
-
- f1 := filepath.Join(env.base, "restore0", "testdata", "subdir1", "subdir2")
- _, err := os.Stat(f1)
- rtest.OK(t, err)
-
- // restore with filter "*", this should restore meta data on everything.
- testRunRestoreIncludes(t, env.gopts, filepath.Join(env.base, "restore1"), snapshotID, []string{"*"})
-
- f2 := filepath.Join(env.base, "restore1", "testdata", "subdir1", "subdir2")
- fi, err := os.Stat(f2)
- rtest.OK(t, err)
-
- rtest.Assert(t, fi.ModTime() == time.Unix(0, 0),
- "meta data of intermediate directory hasn't been restore")
-}
-
-func TestFind(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- datafile := testSetupBackupData(t, env)
- opts := BackupOptions{}
-
- testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
- testRunCheck(t, env.gopts)
-
- results := testRunFind(t, false, env.gopts, "unexistingfile")
- rtest.Assert(t, len(results) == 0, "unexisting file found in repo (%v)", datafile)
-
- results = testRunFind(t, false, env.gopts, "testfile")
- lines := strings.Split(string(results), "\n")
- rtest.Assert(t, len(lines) == 2, "expected one file found in repo (%v)", datafile)
-
- results = testRunFind(t, false, env.gopts, "testfile*")
- lines = strings.Split(string(results), "\n")
- rtest.Assert(t, len(lines) == 4, "expected three files found in repo (%v)", datafile)
-}
-
-type testMatch struct {
- Path string `json:"path,omitempty"`
- Permissions string `json:"permissions,omitempty"`
- Size uint64 `json:"size,omitempty"`
- Date time.Time `json:"date,omitempty"`
- UID uint32 `json:"uid,omitempty"`
- GID uint32 `json:"gid,omitempty"`
-}
-
-type testMatches struct {
- Hits int `json:"hits,omitempty"`
- SnapshotID string `json:"snapshot,omitempty"`
- Matches []testMatch `json:"matches,omitempty"`
-}
-
-func TestFindJSON(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- datafile := testSetupBackupData(t, env)
- opts := BackupOptions{}
-
- testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
- testRunCheck(t, env.gopts)
-
- results := testRunFind(t, true, env.gopts, "unexistingfile")
- matches := []testMatches{}
- rtest.OK(t, json.Unmarshal(results, &matches))
- rtest.Assert(t, len(matches) == 0, "expected no match in repo (%v)", datafile)
-
- results = testRunFind(t, true, env.gopts, "testfile")
- rtest.OK(t, json.Unmarshal(results, &matches))
- rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile)
- rtest.Assert(t, len(matches[0].Matches) == 1, "expected a single file to match (%v)", datafile)
- rtest.Assert(t, matches[0].Hits == 1, "expected hits to show 1 match (%v)", datafile)
-
- results = testRunFind(t, true, env.gopts, "testfile*")
- rtest.OK(t, json.Unmarshal(results, &matches))
- rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile)
- rtest.Assert(t, len(matches[0].Matches) == 3, "expected 3 files to match (%v)", datafile)
- rtest.Assert(t, matches[0].Hits == 3, "expected hits to show 3 matches (%v)", datafile)
-}
-
-func testRebuildIndex(t *testing.T, backendTestHook backendWrapper) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- datafile := filepath.Join("..", "..", "internal", "checker", "testdata", "duplicate-packs-in-index-test-repo.tar.gz")
- rtest.SetupTarTestFixture(t, env.base, datafile)
-
- out, err := testRunCheckOutput(env.gopts)
- if !strings.Contains(out, "contained in several indexes") {
- t.Fatalf("did not find checker hint for packs in several indexes")
- }
-
- if err != nil {
- t.Fatalf("expected no error from checker for test repository, got %v", err)
- }
-
- if !strings.Contains(out, "restic rebuild-index") {
- t.Fatalf("did not find hint for rebuild-index command")
- }
-
- env.gopts.backendTestHook = backendTestHook
- testRunRebuildIndex(t, env.gopts)
-
- env.gopts.backendTestHook = nil
- out, err = testRunCheckOutput(env.gopts)
- if len(out) != 0 {
- t.Fatalf("expected no output from the checker, got: %v", out)
- }
-
- if err != nil {
- t.Fatalf("expected no error from checker after rebuild-index, got: %v", err)
- }
-}
-
-func TestRebuildIndex(t *testing.T) {
- testRebuildIndex(t, nil)
-}
-
-func TestRebuildIndexAlwaysFull(t *testing.T) {
- indexFull := index.IndexFull
- defer func() {
- index.IndexFull = indexFull
- }()
- index.IndexFull = func(*index.Index, bool) bool { return true }
- testRebuildIndex(t, nil)
-}
-
-// indexErrorBackend modifies the first index after reading.
-type indexErrorBackend struct {
- restic.Backend
- lock sync.Mutex
- hasErred bool
-}
-
-func (b *indexErrorBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64, consumer func(rd io.Reader) error) error {
- return b.Backend.Load(ctx, h, length, offset, func(rd io.Reader) error {
- // protect hasErred
- b.lock.Lock()
- defer b.lock.Unlock()
- if !b.hasErred && h.Type == restic.IndexFile {
- b.hasErred = true
- return consumer(errorReadCloser{rd})
- }
- return consumer(rd)
- })
-}
-
-type errorReadCloser struct {
- io.Reader
-}
-
-func (erd errorReadCloser) Read(p []byte) (int, error) {
- n, err := erd.Reader.Read(p)
- if n > 0 {
- p[0] ^= 1
- }
- return n, err
-}
-
-func TestRebuildIndexDamage(t *testing.T) {
- testRebuildIndex(t, func(r restic.Backend) (restic.Backend, error) {
- return &indexErrorBackend{
- Backend: r,
- }, nil
- })
-}
-
-type appendOnlyBackend struct {
- restic.Backend
-}
-
-// called via repo.Backend().Remove()
-func (b *appendOnlyBackend) Remove(ctx context.Context, h restic.Handle) error {
- return errors.Errorf("Failed to remove %v", h)
-}
-
-func TestRebuildIndexFailsOnAppendOnly(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- datafile := filepath.Join("..", "..", "internal", "checker", "testdata", "duplicate-packs-in-index-test-repo.tar.gz")
- rtest.SetupTarTestFixture(t, env.base, datafile)
-
- globalOptions.stdout = io.Discard
- defer func() {
- globalOptions.stdout = os.Stdout
- }()
-
- env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) {
- return &appendOnlyBackend{r}, nil
- }
- err := runRebuildIndex(context.TODO(), RebuildIndexOptions{}, env.gopts)
- if err == nil {
- t.Error("expected rebuildIndex to fail")
- }
- t.Log(err)
-}
-
func TestCheckRestoreNoLock(t *testing.T) {
env, cleanup := withTestEnvironment(t)
defer cleanup()
@@ -1575,217 +32,10 @@ func TestCheckRestoreNoLock(t *testing.T) {
testRunCheck(t, env.gopts)
- snapshotIDs := testRunList(t, "snapshots", env.gopts)
- if len(snapshotIDs) == 0 {
- t.Fatalf("found no snapshots")
- }
-
+ snapshotIDs := testListSnapshots(t, env.gopts, 4)
testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshotIDs[0])
}
-func TestPrune(t *testing.T) {
- testPruneVariants(t, false)
- testPruneVariants(t, true)
-}
-
-func testPruneVariants(t *testing.T, unsafeNoSpaceRecovery bool) {
- suffix := ""
- if unsafeNoSpaceRecovery {
- suffix = "-recovery"
- }
- t.Run("0"+suffix, func(t *testing.T) {
- opts := PruneOptions{MaxUnused: "0%", unsafeRecovery: unsafeNoSpaceRecovery}
- checkOpts := CheckOptions{ReadData: true, CheckUnused: true}
- testPrune(t, opts, checkOpts)
- })
-
- t.Run("50"+suffix, func(t *testing.T) {
- opts := PruneOptions{MaxUnused: "50%", unsafeRecovery: unsafeNoSpaceRecovery}
- checkOpts := CheckOptions{ReadData: true}
- testPrune(t, opts, checkOpts)
- })
-
- t.Run("unlimited"+suffix, func(t *testing.T) {
- opts := PruneOptions{MaxUnused: "unlimited", unsafeRecovery: unsafeNoSpaceRecovery}
- checkOpts := CheckOptions{ReadData: true}
- testPrune(t, opts, checkOpts)
- })
-
- t.Run("CachableOnly"+suffix, func(t *testing.T) {
- opts := PruneOptions{MaxUnused: "5%", RepackCachableOnly: true, unsafeRecovery: unsafeNoSpaceRecovery}
- checkOpts := CheckOptions{ReadData: true}
- testPrune(t, opts, checkOpts)
- })
- t.Run("Small", func(t *testing.T) {
- opts := PruneOptions{MaxUnused: "unlimited", RepackSmall: true}
- checkOpts := CheckOptions{ReadData: true, CheckUnused: true}
- testPrune(t, opts, checkOpts)
- })
-}
-
-func createPrunableRepo(t *testing.T, env *testEnvironment) {
- testSetupBackupData(t, env)
- opts := BackupOptions{}
-
- testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
- firstSnapshot := testRunList(t, "snapshots", env.gopts)
- rtest.Assert(t, len(firstSnapshot) == 1,
- "expected one snapshot, got %v", firstSnapshot)
-
- testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
- testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
-
- snapshotIDs := testRunList(t, "snapshots", env.gopts)
- rtest.Assert(t, len(snapshotIDs) == 3,
- "expected 3 snapshot, got %v", snapshotIDs)
-
- testRunForgetJSON(t, env.gopts)
- testRunForget(t, env.gopts, firstSnapshot[0].String())
-}
-
-func testPrune(t *testing.T, pruneOpts PruneOptions, checkOpts CheckOptions) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- createPrunableRepo(t, env)
- testRunPrune(t, env.gopts, pruneOpts)
- rtest.OK(t, runCheck(context.TODO(), checkOpts, env.gopts, nil))
-}
-
-var pruneDefaultOptions = PruneOptions{MaxUnused: "5%"}
-
-func listPacks(gopts GlobalOptions, t *testing.T) restic.IDSet {
- r, err := OpenRepository(context.TODO(), gopts)
- rtest.OK(t, err)
-
- packs := restic.NewIDSet()
-
- rtest.OK(t, r.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error {
- packs.Insert(id)
- return nil
- }))
- return packs
-}
-
-func TestPruneWithDamagedRepository(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- datafile := filepath.Join("testdata", "backup-data.tar.gz")
- testRunInit(t, env.gopts)
-
- rtest.SetupTarTestFixture(t, env.testdata, datafile)
- opts := BackupOptions{}
-
- // create and delete snapshot to create unused blobs
- testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
- firstSnapshot := testRunList(t, "snapshots", env.gopts)
- rtest.Assert(t, len(firstSnapshot) == 1,
- "expected one snapshot, got %v", firstSnapshot)
- testRunForget(t, env.gopts, firstSnapshot[0].String())
-
- oldPacks := listPacks(env.gopts, t)
-
- // create new snapshot, but lose all data
- testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
- snapshotIDs := testRunList(t, "snapshots", env.gopts)
-
- removePacksExcept(env.gopts, t, oldPacks, false)
-
- rtest.Assert(t, len(snapshotIDs) == 1,
- "expected one snapshot, got %v", snapshotIDs)
-
- oldHook := env.gopts.backendTestHook
- env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) { return newListOnceBackend(r), nil }
- defer func() {
- env.gopts.backendTestHook = oldHook
- }()
- // prune should fail
- rtest.Assert(t, runPrune(context.TODO(), pruneDefaultOptions, env.gopts) == errorPacksMissing,
- "prune should have reported index not complete error")
-}
-
-// Test repos for edge cases
-func TestEdgeCaseRepos(t *testing.T) {
- opts := CheckOptions{}
-
- // repo where index is completely missing
- // => check and prune should fail
- t.Run("no-index", func(t *testing.T) {
- testEdgeCaseRepo(t, "repo-index-missing.tar.gz", opts, pruneDefaultOptions, false, false)
- })
-
- // repo where an existing and used blob is missing from the index
- // => check and prune should fail
- t.Run("index-missing-blob", func(t *testing.T) {
- testEdgeCaseRepo(t, "repo-index-missing-blob.tar.gz", opts, pruneDefaultOptions, false, false)
- })
-
- // repo where a blob is missing
- // => check and prune should fail
- t.Run("missing-data", func(t *testing.T) {
- testEdgeCaseRepo(t, "repo-data-missing.tar.gz", opts, pruneDefaultOptions, false, false)
- })
-
- // repo where blobs which are not needed are missing or in invalid pack files
- // => check should fail and prune should repair this
- t.Run("missing-unused-data", func(t *testing.T) {
- testEdgeCaseRepo(t, "repo-unused-data-missing.tar.gz", opts, pruneDefaultOptions, false, true)
- })
-
- // repo where data exists that is not referenced
- // => check and prune should fully work
- t.Run("unreferenced-data", func(t *testing.T) {
- testEdgeCaseRepo(t, "repo-unreferenced-data.tar.gz", opts, pruneDefaultOptions, true, true)
- })
-
- // repo where an obsolete index still exists
- // => check and prune should fully work
- t.Run("obsolete-index", func(t *testing.T) {
- testEdgeCaseRepo(t, "repo-obsolete-index.tar.gz", opts, pruneDefaultOptions, true, true)
- })
-
- // repo which contains mixed (data/tree) packs
- // => check and prune should fully work
- t.Run("mixed-packs", func(t *testing.T) {
- testEdgeCaseRepo(t, "repo-mixed.tar.gz", opts, pruneDefaultOptions, true, true)
- })
-
- // repo which contains duplicate blobs
- // => checking for unused data should report an error and prune resolves the
- // situation
- opts = CheckOptions{
- ReadData: true,
- CheckUnused: true,
- }
- t.Run("duplicates", func(t *testing.T) {
- testEdgeCaseRepo(t, "repo-duplicates.tar.gz", opts, pruneDefaultOptions, false, true)
- })
-}
-
-func testEdgeCaseRepo(t *testing.T, tarfile string, optionsCheck CheckOptions, optionsPrune PruneOptions, checkOK, pruneOK bool) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- datafile := filepath.Join("testdata", tarfile)
- rtest.SetupTarTestFixture(t, env.base, datafile)
-
- if checkOK {
- testRunCheck(t, env.gopts)
- } else {
- rtest.Assert(t, runCheck(context.TODO(), optionsCheck, env.gopts, nil) != nil,
- "check should have reported an error")
- }
-
- if pruneOK {
- testRunPrune(t, env.gopts, optionsPrune)
- testRunCheck(t, env.gopts)
- } else {
- rtest.Assert(t, runPrune(context.TODO(), optionsPrune, env.gopts) != nil,
- "prune should have reported an error")
- }
-}
-
// a listOnceBackend only allows listing once per filetype
// listing filetypes more than once may cause problems with eventually consistent
// backends (like e.g. Amazon S3) as the second listing may be inconsistent to what
@@ -1837,301 +87,15 @@ func TestListOnce(t *testing.T) {
testRunPrune(t, env.gopts, pruneOpts)
rtest.OK(t, runCheck(context.TODO(), checkOpts, env.gopts, nil))
- rtest.OK(t, runRebuildIndex(context.TODO(), RebuildIndexOptions{}, env.gopts))
- rtest.OK(t, runRebuildIndex(context.TODO(), RebuildIndexOptions{ReadAllPacks: true}, env.gopts))
-}
-
-func TestHardLink(t *testing.T) {
- // this test assumes a test set with a single directory containing hard linked files
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- datafile := filepath.Join("testdata", "test.hl.tar.gz")
- fd, err := os.Open(datafile)
- if os.IsNotExist(err) {
- t.Skipf("unable to find data file %q, skipping", datafile)
- return
- }
- rtest.OK(t, err)
- rtest.OK(t, fd.Close())
-
- testRunInit(t, env.gopts)
-
- rtest.SetupTarTestFixture(t, env.testdata, datafile)
-
- linkTests := createFileSetPerHardlink(env.testdata)
-
- opts := BackupOptions{}
-
- // first backup
- testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
- snapshotIDs := testRunList(t, "snapshots", env.gopts)
- rtest.Assert(t, len(snapshotIDs) == 1,
- "expected one snapshot, got %v", snapshotIDs)
-
- testRunCheck(t, env.gopts)
-
- // restore all backups and compare
- for i, snapshotID := range snapshotIDs {
- restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
- t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir)
- testRunRestore(t, env.gopts, restoredir, snapshotID)
- diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata"))
- rtest.Assert(t, diff == "", "directories are not equal %v", diff)
-
- linkResults := createFileSetPerHardlink(filepath.Join(restoredir, "testdata"))
- rtest.Assert(t, linksEqual(linkTests, linkResults),
- "links are not equal")
- }
-
- testRunCheck(t, env.gopts)
-}
-
-func linksEqual(source, dest map[uint64][]string) bool {
- for _, vs := range source {
- found := false
- for kd, vd := range dest {
- if linkEqual(vs, vd) {
- delete(dest, kd)
- found = true
- break
- }
- }
- if !found {
- return false
- }
- }
-
- return len(dest) == 0
-}
-
-func linkEqual(source, dest []string) bool {
- // equal if sliced are equal without considering order
- if source == nil && dest == nil {
- return true
- }
-
- if source == nil || dest == nil {
- return false
- }
-
- if len(source) != len(dest) {
- return false
- }
-
- for i := range source {
- found := false
- for j := range dest {
- if source[i] == dest[j] {
- found = true
- break
- }
- }
- if !found {
- return false
- }
- }
-
- return true
-}
-
-func TestQuietBackup(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- testSetupBackupData(t, env)
- opts := BackupOptions{}
-
- env.gopts.Quiet = false
- testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
- snapshotIDs := testRunList(t, "snapshots", env.gopts)
- rtest.Assert(t, len(snapshotIDs) == 1,
- "expected one snapshot, got %v", snapshotIDs)
-
- testRunCheck(t, env.gopts)
-
- env.gopts.Quiet = true
- testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
- snapshotIDs = testRunList(t, "snapshots", env.gopts)
- rtest.Assert(t, len(snapshotIDs) == 2,
- "expected two snapshots, got %v", snapshotIDs)
-
- testRunCheck(t, env.gopts)
-}
-
-func copyFile(dst string, src string) error {
- srcFile, err := os.Open(src)
- if err != nil {
- return err
- }
-
- dstFile, err := os.Create(dst)
- if err != nil {
- // ignore subsequent errors
- _ = srcFile.Close()
- return err
- }
-
- _, err = io.Copy(dstFile, srcFile)
- if err != nil {
- // ignore subsequent errors
- _ = srcFile.Close()
- _ = dstFile.Close()
- return err
- }
-
- err = srcFile.Close()
- if err != nil {
- // ignore subsequent errors
- _ = dstFile.Close()
- return err
- }
-
- err = dstFile.Close()
- if err != nil {
- return err
- }
-
- return nil
-}
-
-var diffOutputRegexPatterns = []string{
- "-.+modfile",
- "M.+modfile1",
- "\\+.+modfile2",
- "\\+.+modfile3",
- "\\+.+modfile4",
- "-.+submoddir",
- "-.+submoddir.subsubmoddir",
- "\\+.+submoddir2",
- "\\+.+submoddir2.subsubmoddir",
- "Files: +2 new, +1 removed, +1 changed",
- "Dirs: +3 new, +2 removed",
- "Data Blobs: +2 new, +1 removed",
- "Added: +7[0-9]{2}\\.[0-9]{3} KiB",
- "Removed: +2[0-9]{2}\\.[0-9]{3} KiB",
-}
-
-func setupDiffRepo(t *testing.T) (*testEnvironment, func(), string, string) {
- env, cleanup := withTestEnvironment(t)
- testRunInit(t, env.gopts)
-
- datadir := filepath.Join(env.base, "testdata")
- testdir := filepath.Join(datadir, "testdir")
- subtestdir := filepath.Join(testdir, "subtestdir")
- testfile := filepath.Join(testdir, "testfile")
-
- rtest.OK(t, os.Mkdir(testdir, 0755))
- rtest.OK(t, os.Mkdir(subtestdir, 0755))
- rtest.OK(t, appendRandomData(testfile, 256*1024))
-
- moddir := filepath.Join(datadir, "moddir")
- submoddir := filepath.Join(moddir, "submoddir")
- subsubmoddir := filepath.Join(submoddir, "subsubmoddir")
- modfile := filepath.Join(moddir, "modfile")
- rtest.OK(t, os.Mkdir(moddir, 0755))
- rtest.OK(t, os.Mkdir(submoddir, 0755))
- rtest.OK(t, os.Mkdir(subsubmoddir, 0755))
- rtest.OK(t, copyFile(modfile, testfile))
- rtest.OK(t, appendRandomData(modfile+"1", 256*1024))
-
- snapshots := make(map[string]struct{})
- opts := BackupOptions{}
- testRunBackup(t, "", []string{datadir}, opts, env.gopts)
- snapshots, firstSnapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
-
- rtest.OK(t, os.Rename(modfile, modfile+"3"))
- rtest.OK(t, os.Rename(submoddir, submoddir+"2"))
- rtest.OK(t, appendRandomData(modfile+"1", 256*1024))
- rtest.OK(t, appendRandomData(modfile+"2", 256*1024))
- rtest.OK(t, os.Mkdir(modfile+"4", 0755))
-
- testRunBackup(t, "", []string{datadir}, opts, env.gopts)
- _, secondSnapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
-
- return env, cleanup, firstSnapshotID, secondSnapshotID
-}
-
-func TestDiff(t *testing.T) {
- env, cleanup, firstSnapshotID, secondSnapshotID := setupDiffRepo(t)
- defer cleanup()
-
- // quiet suppresses the diff output except for the summary
- env.gopts.Quiet = false
- _, err := testRunDiffOutput(env.gopts, "", secondSnapshotID)
- rtest.Assert(t, err != nil, "expected error on invalid snapshot id")
-
- out, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID)
- rtest.OK(t, err)
-
- for _, pattern := range diffOutputRegexPatterns {
- r, err := regexp.Compile(pattern)
- rtest.Assert(t, err == nil, "failed to compile regexp %v", pattern)
- rtest.Assert(t, r.MatchString(out), "expected pattern %v in output, got\n%v", pattern, out)
- }
-
- // check quiet output
- env.gopts.Quiet = true
- outQuiet, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID)
- rtest.OK(t, err)
-
- rtest.Assert(t, len(outQuiet) < len(out), "expected shorter output on quiet mode %v vs. %v", len(outQuiet), len(out))
-}
-
-type typeSniffer struct {
- MessageType string `json:"message_type"`
-}
-
-func TestDiffJSON(t *testing.T) {
- env, cleanup, firstSnapshotID, secondSnapshotID := setupDiffRepo(t)
- defer cleanup()
-
- // quiet suppresses the diff output except for the summary
- env.gopts.Quiet = false
- env.gopts.JSON = true
- out, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID)
- rtest.OK(t, err)
-
- var stat DiffStatsContainer
- var changes int
-
- scanner := bufio.NewScanner(strings.NewReader(out))
- for scanner.Scan() {
- line := scanner.Text()
- var sniffer typeSniffer
- rtest.OK(t, json.Unmarshal([]byte(line), &sniffer))
- switch sniffer.MessageType {
- case "change":
- changes++
- case "statistics":
- rtest.OK(t, json.Unmarshal([]byte(line), &stat))
- default:
- t.Fatalf("unexpected message type %v", sniffer.MessageType)
- }
- }
- rtest.Equals(t, 9, changes)
- rtest.Assert(t, stat.Added.Files == 2 && stat.Added.Dirs == 3 && stat.Added.DataBlobs == 2 &&
- stat.Removed.Files == 1 && stat.Removed.Dirs == 2 && stat.Removed.DataBlobs == 1 &&
- stat.ChangedFiles == 1, "unexpected statistics")
-
- // check quiet output
- env.gopts.Quiet = true
- outQuiet, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID)
- rtest.OK(t, err)
-
- stat = DiffStatsContainer{}
- rtest.OK(t, json.Unmarshal([]byte(outQuiet), &stat))
- rtest.Assert(t, stat.Added.Files == 2 && stat.Added.Dirs == 3 && stat.Added.DataBlobs == 2 &&
- stat.Removed.Files == 1 && stat.Removed.Dirs == 2 && stat.Removed.DataBlobs == 1 &&
- stat.ChangedFiles == 1, "unexpected statistics")
- rtest.Assert(t, stat.SourceSnapshot == firstSnapshotID && stat.TargetSnapshot == secondSnapshotID, "unexpected snapshot ids")
+ rtest.OK(t, runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts))
+ rtest.OK(t, runRebuildIndex(context.TODO(), RepairIndexOptions{ReadAllPacks: true}, env.gopts))
}
type writeToOnly struct {
rd io.Reader
}
-func (r *writeToOnly) Read(p []byte) (n int, err error) {
+func (r *writeToOnly) Read(_ []byte) (n int, err error) {
return 0, fmt.Errorf("should have called WriteTo instead")
}
@@ -2169,9 +133,7 @@ func TestBackendLoadWriteTo(t *testing.T) {
// loading snapshots must still work
env.gopts.NoCache = false
- firstSnapshot := testRunList(t, "snapshots", env.gopts)
- rtest.Assert(t, len(firstSnapshot) == 1,
- "expected one snapshot, got %v", firstSnapshot)
+ testListSnapshots(t, env.gopts, 1)
}
func TestFindListOnce(t *testing.T) {
@@ -2187,9 +149,9 @@ func TestFindListOnce(t *testing.T) {
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
- secondSnapshot := testRunList(t, "snapshots", env.gopts)
+ secondSnapshot := testListSnapshots(t, env.gopts, 2)
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
- thirdSnapshot := restic.NewIDSet(testRunList(t, "snapshots", env.gopts)...)
+ thirdSnapshot := restic.NewIDSet(testListSnapshots(t, env.gopts, 3)...)
repo, err := OpenRepository(context.TODO(), env.gopts)
rtest.OK(t, err)
diff --git a/cmd/restic/local_layout_test.go b/cmd/restic/local_layout_test.go
deleted file mode 100644
index eb614f1c3..000000000
--- a/cmd/restic/local_layout_test.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package main
-
-import (
- "path/filepath"
- "testing"
-
- rtest "github.com/restic/restic/internal/test"
-)
-
-func TestRestoreLocalLayout(t *testing.T) {
- env, cleanup := withTestEnvironment(t)
- defer cleanup()
-
- var tests = []struct {
- filename string
- layout string
- }{
- {"repo-layout-default.tar.gz", ""},
- {"repo-layout-s3legacy.tar.gz", ""},
- {"repo-layout-default.tar.gz", "default"},
- {"repo-layout-s3legacy.tar.gz", "s3legacy"},
- }
-
- for _, test := range tests {
- datafile := filepath.Join("..", "..", "internal", "backend", "testdata", test.filename)
-
- rtest.SetupTarTestFixture(t, env.base, datafile)
-
- env.gopts.extended["local.layout"] = test.layout
-
- // check the repo
- testRunCheck(t, env.gopts)
-
- // restore latest snapshot
- target := filepath.Join(env.base, "restore")
- testRunRestoreLatest(t, env.gopts, target, nil, nil)
-
- rtest.RemoveAll(t, filepath.Join(env.base, "repo"))
- rtest.RemoveAll(t, target)
- }
-}
diff --git a/cmd/restic/lock.go b/cmd/restic/lock.go
index f39a08db6..11c1ed8f5 100644
--- a/cmd/restic/lock.go
+++ b/cmd/restic/lock.go
@@ -2,6 +2,7 @@ package main
import (
"context"
+ "fmt"
"sync"
"time"
@@ -11,6 +12,7 @@ import (
)
type lockContext struct {
+ lock *restic.Lock
cancel context.CancelFunc
refreshWG sync.WaitGroup
}
@@ -21,17 +23,29 @@ var globalLocks struct {
sync.Once
}
-func lockRepo(ctx context.Context, repo restic.Repository) (*restic.Lock, context.Context, error) {
- return lockRepository(ctx, repo, false)
+func lockRepo(ctx context.Context, repo restic.Repository, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) {
+ return lockRepository(ctx, repo, false, retryLock, json)
}
-func lockRepoExclusive(ctx context.Context, repo restic.Repository) (*restic.Lock, context.Context, error) {
- return lockRepository(ctx, repo, true)
+func lockRepoExclusive(ctx context.Context, repo restic.Repository, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) {
+ return lockRepository(ctx, repo, true, retryLock, json)
+}
+
+var (
+ retrySleepStart = 5 * time.Second
+ retrySleepMax = 60 * time.Second
+)
+
+func minDuration(a, b time.Duration) time.Duration {
+ if a <= b {
+ return a
+ }
+ return b
}
// lockRepository wraps the ctx such that it is cancelled when the repository is unlocked
// cancelling the original context also stops the lock refresh
-func lockRepository(ctx context.Context, repo restic.Repository, exclusive bool) (*restic.Lock, context.Context, error) {
+func lockRepository(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) {
// make sure that a repository is unlocked properly and after cancel() was
// called by the cleanup handler in global.go
globalLocks.Do(func() {
@@ -43,26 +57,65 @@ func lockRepository(ctx context.Context, repo restic.Repository, exclusive bool)
lockFn = restic.NewExclusiveLock
}
- lock, err := lockFn(ctx, repo)
+ var lock *restic.Lock
+ var err error
+
+ retrySleep := minDuration(retrySleepStart, retryLock)
+ retryMessagePrinted := false
+ retryTimeout := time.After(retryLock)
+
+retryLoop:
+ for {
+ lock, err = lockFn(ctx, repo)
+ if err != nil && restic.IsAlreadyLocked(err) {
+
+ if !retryMessagePrinted {
+ if !json {
+ Verbosef("repo already locked, waiting up to %s for the lock\n", retryLock)
+ }
+ retryMessagePrinted = true
+ }
+
+ debug.Log("repo already locked, retrying in %v", retrySleep)
+ retrySleepCh := time.After(retrySleep)
+
+ select {
+ case <-ctx.Done():
+ return nil, ctx, ctx.Err()
+ case <-retryTimeout:
+ debug.Log("repo already locked, timeout expired")
+ // Last lock attempt
+ lock, err = lockFn(ctx, repo)
+ break retryLoop
+ case <-retrySleepCh:
+ retrySleep = minDuration(retrySleep*2, retrySleepMax)
+ }
+ } else {
+ // anything else, either a successful lock or another error
+ break retryLoop
+ }
+ }
if restic.IsInvalidLock(err) {
return nil, ctx, errors.Fatalf("%v\n\nthe `unlock --remove-all` command can be used to remove invalid locks. Make sure that no other restic process is accessing the repository when running the command", err)
}
if err != nil {
- return nil, ctx, errors.Fatalf("unable to create lock in backend: %v", err)
+ return nil, ctx, fmt.Errorf("unable to create lock in backend: %w", err)
}
debug.Log("create lock %p (exclusive %v)", lock, exclusive)
ctx, cancel := context.WithCancel(ctx)
lockInfo := &lockContext{
+ lock: lock,
cancel: cancel,
}
lockInfo.refreshWG.Add(2)
refreshChan := make(chan struct{})
+ forceRefreshChan := make(chan refreshLockRequest)
globalLocks.Lock()
globalLocks.locks[lock] = lockInfo
- go refreshLocks(ctx, lock, lockInfo, refreshChan)
- go monitorLockRefresh(ctx, lock, lockInfo, refreshChan)
+ go refreshLocks(ctx, repo.Backend(), lockInfo, refreshChan, forceRefreshChan)
+ go monitorLockRefresh(ctx, lockInfo, refreshChan, forceRefreshChan)
globalLocks.Unlock()
return lock, ctx, err
@@ -74,8 +127,13 @@ var refreshInterval = 5 * time.Minute
// the difference allows to compensate for a small time drift between clients.
var refreshabilityTimeout = restic.StaleLockTimeout - refreshInterval*3/2
-func refreshLocks(ctx context.Context, lock *restic.Lock, lockInfo *lockContext, refreshed chan<- struct{}) {
+type refreshLockRequest struct {
+ result chan bool
+}
+
+func refreshLocks(ctx context.Context, backend restic.Backend, lockInfo *lockContext, refreshed chan<- struct{}, forceRefresh <-chan refreshLockRequest) {
debug.Log("start")
+ lock := lockInfo.lock
ticker := time.NewTicker(refreshInterval)
lastRefresh := lock.Time
@@ -99,6 +157,22 @@ func refreshLocks(ctx context.Context, lock *restic.Lock, lockInfo *lockContext,
case <-ctx.Done():
debug.Log("terminate")
return
+
+ case req := <-forceRefresh:
+ debug.Log("trying to refresh stale lock")
+ // keep on going if our current lock still exists
+ success := tryRefreshStaleLock(ctx, backend, lock, lockInfo.cancel)
+ // inform refresh goroutine about forced refresh
+ select {
+ case <-ctx.Done():
+ case req.result <- success:
+ }
+
+ if success {
+ // update lock refresh time
+ lastRefresh = lock.Time
+ }
+
case <-ticker.C:
if time.Since(lastRefresh) > refreshabilityTimeout {
// the lock is too old, wait until the expiry monitor cancels the context
@@ -111,7 +185,7 @@ func refreshLocks(ctx context.Context, lock *restic.Lock, lockInfo *lockContext,
Warnf("unable to refresh lock: %v\n", err)
} else {
lastRefresh = lock.Time
- // inform monitor gorountine about successful refresh
+ // inform monitor goroutine about successful refresh
select {
case <-ctx.Done():
case refreshed <- struct{}{}:
@@ -121,7 +195,7 @@ func refreshLocks(ctx context.Context, lock *restic.Lock, lockInfo *lockContext,
}
}
-func monitorLockRefresh(ctx context.Context, lock *restic.Lock, lockInfo *lockContext, refreshed <-chan struct{}) {
+func monitorLockRefresh(ctx context.Context, lockInfo *lockContext, refreshed <-chan struct{}, forceRefresh chan<- refreshLockRequest) {
// time.Now() might use a monotonic timer which is paused during standby
// convert to unix time to ensure we compare real time values
lastRefresh := time.Now().UnixNano()
@@ -133,24 +207,47 @@ func monitorLockRefresh(ctx context.Context, lock *restic.Lock, lockInfo *lockCo
// timers are paused during standby, which is a problem as the refresh timeout
// _must_ expire if the host was too long in standby. Thus fall back to periodic checks
// https://github.com/golang/go/issues/35012
- timer := time.NewTimer(pollDuration)
+ ticker := time.NewTicker(pollDuration)
defer func() {
- timer.Stop()
+ ticker.Stop()
lockInfo.cancel()
lockInfo.refreshWG.Done()
}()
+ var refreshStaleLockResult chan bool
+
for {
select {
case <-ctx.Done():
debug.Log("terminate expiry monitoring")
return
case <-refreshed:
+ if refreshStaleLockResult != nil {
+ // ignore delayed refresh notifications while the stale lock is refreshed
+ continue
+ }
lastRefresh = time.Now().UnixNano()
- case <-timer.C:
- if time.Now().UnixNano()-lastRefresh < refreshabilityTimeout.Nanoseconds() {
- // restart timer
- timer.Reset(pollDuration)
+ case <-ticker.C:
+ if time.Now().UnixNano()-lastRefresh < refreshabilityTimeout.Nanoseconds() || refreshStaleLockResult != nil {
+ continue
+ }
+
+ debug.Log("trying to refreshStaleLock")
+ // keep on going if our current lock still exists
+ refreshReq := refreshLockRequest{
+ result: make(chan bool),
+ }
+ refreshStaleLockResult = refreshReq.result
+
+ // inform refresh goroutine about forced refresh
+ select {
+ case <-ctx.Done():
+ case forceRefresh <- refreshReq:
+ }
+ case success := <-refreshStaleLockResult:
+ if success {
+ lastRefresh = time.Now().UnixNano()
+ refreshStaleLockResult = nil
continue
}
@@ -160,6 +257,25 @@ func monitorLockRefresh(ctx context.Context, lock *restic.Lock, lockInfo *lockCo
}
}
+func tryRefreshStaleLock(ctx context.Context, backend restic.Backend, lock *restic.Lock, cancel context.CancelFunc) bool {
+ freeze := restic.AsBackend[restic.FreezeBackend](backend)
+ if freeze != nil {
+ debug.Log("freezing backend")
+ freeze.Freeze()
+ defer freeze.Unfreeze()
+ }
+
+ err := lock.RefreshStaleLock(ctx)
+ if err != nil {
+ Warnf("failed to refresh stale lock: %v\n", err)
+ // cancel context while the backend is still frozen to prevent accidental modifications
+ cancel()
+ return false
+ }
+
+ return true
+}
+
func unlockRepo(lock *restic.Lock) {
if lock == nil {
return
diff --git a/cmd/restic/lock_test.go b/cmd/restic/lock_test.go
index c074f15a6..2f8420853 100644
--- a/cmd/restic/lock_test.go
+++ b/cmd/restic/lock_test.go
@@ -3,30 +3,42 @@ package main
import (
"context"
"fmt"
+ "runtime"
+ "strings"
+ "sync"
"testing"
"time"
+ "github.com/restic/restic/internal/backend/location"
+ "github.com/restic/restic/internal/backend/mem"
+ "github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
- rtest "github.com/restic/restic/internal/test"
+ "github.com/restic/restic/internal/test"
)
-func openTestRepo(t *testing.T, wrapper backendWrapper) (*repository.Repository, func(), *testEnvironment) {
+func openLockTestRepo(t *testing.T, wrapper backendWrapper) (*repository.Repository, func(), *testEnvironment) {
env, cleanup := withTestEnvironment(t)
+
+ reg := location.NewRegistry()
+ reg.Register(mem.NewFactory())
+ env.gopts.backends = reg
+ env.gopts.Repo = "mem:"
+
if wrapper != nil {
env.gopts.backendTestHook = wrapper
}
testRunInit(t, env.gopts)
repo, err := OpenRepository(context.TODO(), env.gopts)
- rtest.OK(t, err)
+ test.OK(t, err)
return repo, cleanup, env
}
-func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository) (*restic.Lock, context.Context) {
- lock, wrappedCtx, err := lockRepo(ctx, repo)
- rtest.OK(t, err)
- rtest.OK(t, wrappedCtx.Err())
+func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, env *testEnvironment) (*restic.Lock, context.Context) {
+ lock, wrappedCtx, err := lockRepo(ctx, repo, env.gopts.RetryLock, env.gopts.JSON)
+ test.OK(t, err)
+ test.OK(t, wrappedCtx.Err())
if lock.Stale() {
t.Fatal("lock returned stale lock")
}
@@ -34,10 +46,10 @@ func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository)
}
func TestLock(t *testing.T) {
- repo, cleanup, _ := openTestRepo(t, nil)
+ repo, cleanup, env := openLockTestRepo(t, nil)
defer cleanup()
- lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo)
+ lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env)
unlockRepo(lock)
if wrappedCtx.Err() == nil {
t.Fatal("unlock did not cancel context")
@@ -45,12 +57,12 @@ func TestLock(t *testing.T) {
}
func TestLockCancel(t *testing.T) {
- repo, cleanup, _ := openTestRepo(t, nil)
+ repo, cleanup, env := openLockTestRepo(t, nil)
defer cleanup()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- lock, wrappedCtx := checkedLockRepo(ctx, t, repo)
+ lock, wrappedCtx := checkedLockRepo(ctx, t, repo, env)
cancel()
if wrappedCtx.Err() == nil {
t.Fatal("canceled parent context did not cancel context")
@@ -61,12 +73,12 @@ func TestLockCancel(t *testing.T) {
}
func TestLockUnlockAll(t *testing.T) {
- repo, cleanup, _ := openTestRepo(t, nil)
+ repo, cleanup, env := openLockTestRepo(t, nil)
defer cleanup()
- lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo)
+ lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env)
_, err := unlockAll(0)
- rtest.OK(t, err)
+ test.OK(t, err)
if wrappedCtx.Err() == nil {
t.Fatal("canceled parent context did not cancel context")
}
@@ -76,18 +88,19 @@ func TestLockUnlockAll(t *testing.T) {
}
func TestLockConflict(t *testing.T) {
- repo, cleanup, env := openTestRepo(t, nil)
+ repo, cleanup, env := openLockTestRepo(t, nil)
defer cleanup()
repo2, err := OpenRepository(context.TODO(), env.gopts)
- rtest.OK(t, err)
+ test.OK(t, err)
- lock, _, err := lockRepoExclusive(context.Background(), repo)
- rtest.OK(t, err)
+ lock, _, err := lockRepoExclusive(context.Background(), repo, env.gopts.RetryLock, env.gopts.JSON)
+ test.OK(t, err)
defer unlockRepo(lock)
- _, _, err = lockRepo(context.Background(), repo2)
+ _, _, err = lockRepo(context.Background(), repo2, env.gopts.RetryLock, env.gopts.JSON)
if err == nil {
t.Fatal("second lock should have failed")
}
+ test.Assert(t, restic.IsAlreadyLocked(err), "unexpected error %v", err)
}
type writeOnceBackend struct {
@@ -104,7 +117,7 @@ func (b *writeOnceBackend) Save(ctx context.Context, h restic.Handle, rd restic.
}
func TestLockFailedRefresh(t *testing.T) {
- repo, cleanup, _ := openTestRepo(t, func(r restic.Backend) (restic.Backend, error) {
+ repo, cleanup, env := openLockTestRepo(t, func(r restic.Backend) (restic.Backend, error) {
return &writeOnceBackend{Backend: r}, nil
})
defer cleanup()
@@ -117,7 +130,7 @@ func TestLockFailedRefresh(t *testing.T) {
refreshInterval, refreshabilityTimeout = ri, rt
}()
- lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo)
+ lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env)
select {
case <-wrappedCtx.Done():
@@ -136,11 +149,13 @@ type loggingBackend struct {
func (b *loggingBackend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
b.t.Logf("save %v @ %v", h, time.Now())
- return b.Backend.Save(ctx, h, rd)
+ err := b.Backend.Save(ctx, h, rd)
+ b.t.Logf("save finished %v @ %v", h, time.Now())
+ return err
}
func TestLockSuccessfulRefresh(t *testing.T) {
- repo, cleanup, _ := openTestRepo(t, func(r restic.Backend) (restic.Backend, error) {
+ repo, cleanup, env := openLockTestRepo(t, func(r restic.Backend) (restic.Backend, error) {
return &loggingBackend{
Backend: r,
t: t,
@@ -151,20 +166,163 @@ func TestLockSuccessfulRefresh(t *testing.T) {
t.Logf("test for successful lock refresh %v", time.Now())
// reduce locking intervals to be suitable for testing
ri, rt := refreshInterval, refreshabilityTimeout
- refreshInterval = 40 * time.Millisecond
- refreshabilityTimeout = 200 * time.Millisecond
+ refreshInterval = 60 * time.Millisecond
+ refreshabilityTimeout = 500 * time.Millisecond
defer func() {
refreshInterval, refreshabilityTimeout = ri, rt
}()
- lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo)
+ lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env)
select {
case <-wrappedCtx.Done():
- t.Fatal("lock refresh failed")
+ // don't call t.Fatal to allow the lock to be properly cleaned up
+ t.Error("lock refresh failed", time.Now())
+
+ // Dump full stacktrace
+ buf := make([]byte, 1024*1024)
+ n := runtime.Stack(buf, true)
+ buf = buf[:n]
+ t.Log(string(buf))
+
case <-time.After(2 * refreshabilityTimeout):
// expected lock refresh to work
}
// unlockRepo should not crash
unlockRepo(lock)
}
+
+type slowBackend struct {
+ restic.Backend
+ m sync.Mutex
+ sleep time.Duration
+}
+
+func (b *slowBackend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
+ b.m.Lock()
+ sleep := b.sleep
+ b.m.Unlock()
+ time.Sleep(sleep)
+ return b.Backend.Save(ctx, h, rd)
+}
+
+func TestLockSuccessfulStaleRefresh(t *testing.T) {
+ var sb *slowBackend
+ repo, cleanup, env := openLockTestRepo(t, func(r restic.Backend) (restic.Backend, error) {
+ sb = &slowBackend{Backend: r}
+ return sb, nil
+ })
+ defer cleanup()
+
+ t.Logf("test for successful lock refresh %v", time.Now())
+ // reduce locking intervals to be suitable for testing
+ ri, rt := refreshInterval, refreshabilityTimeout
+ refreshInterval = 10 * time.Millisecond
+ refreshabilityTimeout = 50 * time.Millisecond
+ defer func() {
+ refreshInterval, refreshabilityTimeout = ri, rt
+ }()
+
+ lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env)
+ // delay lock refreshing long enough that the lock would expire
+ sb.m.Lock()
+ sb.sleep = refreshabilityTimeout + refreshInterval
+ sb.m.Unlock()
+
+ select {
+ case <-wrappedCtx.Done():
+ // don't call t.Fatal to allow the lock to be properly cleaned up
+ t.Error("lock refresh failed", time.Now())
+
+ case <-time.After(refreshabilityTimeout):
+ }
+ // reset slow backend
+ sb.m.Lock()
+ sb.sleep = 0
+ sb.m.Unlock()
+ debug.Log("normal lock period has expired")
+
+ select {
+ case <-wrappedCtx.Done():
+ // don't call t.Fatal to allow the lock to be properly cleaned up
+ t.Error("lock refresh failed", time.Now())
+
+ case <-time.After(3 * refreshabilityTimeout):
+ // expected lock refresh to work
+ }
+
+ // unlockRepo should not crash
+ unlockRepo(lock)
+}
+
+func TestLockWaitTimeout(t *testing.T) {
+ repo, cleanup, env := openLockTestRepo(t, nil)
+ defer cleanup()
+
+ elock, _, err := lockRepoExclusive(context.TODO(), repo, env.gopts.RetryLock, env.gopts.JSON)
+ test.OK(t, err)
+
+ retryLock := 200 * time.Millisecond
+
+ start := time.Now()
+ lock, _, err := lockRepo(context.TODO(), repo, retryLock, env.gopts.JSON)
+ duration := time.Since(start)
+
+ test.Assert(t, err != nil,
+ "create normal lock with exclusively locked repo didn't return an error")
+ test.Assert(t, strings.Contains(err.Error(), "repository is already locked exclusively"),
+ "create normal lock with exclusively locked repo didn't return the correct error")
+ test.Assert(t, retryLock <= duration && duration < retryLock*3/2,
+ "create normal lock with exclusively locked repo didn't wait for the specified timeout")
+
+ test.OK(t, lock.Unlock())
+ test.OK(t, elock.Unlock())
+}
+
+func TestLockWaitCancel(t *testing.T) {
+ repo, cleanup, env := openLockTestRepo(t, nil)
+ defer cleanup()
+
+ elock, _, err := lockRepoExclusive(context.TODO(), repo, env.gopts.RetryLock, env.gopts.JSON)
+ test.OK(t, err)
+
+ retryLock := 200 * time.Millisecond
+ cancelAfter := 40 * time.Millisecond
+
+ start := time.Now()
+ ctx, cancel := context.WithCancel(context.TODO())
+ time.AfterFunc(cancelAfter, cancel)
+
+ lock, _, err := lockRepo(ctx, repo, retryLock, env.gopts.JSON)
+ duration := time.Since(start)
+
+ test.Assert(t, err != nil,
+ "create normal lock with exclusively locked repo didn't return an error")
+ test.Assert(t, strings.Contains(err.Error(), "context canceled"),
+ "create normal lock with exclusively locked repo didn't return the correct error")
+ test.Assert(t, cancelAfter <= duration && duration < retryLock-10*time.Millisecond,
+ "create normal lock with exclusively locked repo didn't return in time, duration %v", duration)
+
+ test.OK(t, lock.Unlock())
+ test.OK(t, elock.Unlock())
+}
+
+func TestLockWaitSuccess(t *testing.T) {
+ repo, cleanup, env := openLockTestRepo(t, nil)
+ defer cleanup()
+
+ elock, _, err := lockRepoExclusive(context.TODO(), repo, env.gopts.RetryLock, env.gopts.JSON)
+ test.OK(t, err)
+
+ retryLock := 200 * time.Millisecond
+ unlockAfter := 40 * time.Millisecond
+
+ time.AfterFunc(unlockAfter, func() {
+ test.OK(t, elock.Unlock())
+ })
+
+ lock, _, err := lockRepo(context.TODO(), repo, retryLock, env.gopts.JSON)
+ test.OK(t, err)
+
+ test.OK(t, lock.Unlock())
+}
diff --git a/cmd/restic/main.go b/cmd/restic/main.go
index cfef7c885..17b9c468d 100644
--- a/cmd/restic/main.go
+++ b/cmd/restic/main.go
@@ -7,6 +7,7 @@ import (
"log"
"os"
"runtime"
+ godebug "runtime/debug"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/options"
@@ -24,6 +25,8 @@ var cmdRoot = &cobra.Command{
Long: `
restic is a backup program which allows saving multiple revisions of files and
directories in an encrypted repository stored on different backends.
+
+The full documentation can be found at https://restic.readthedocs.io/ .
`,
SilenceErrors: true,
SilenceUsage: true,
@@ -63,11 +66,7 @@ directories in an encrypted repository stored on different backends.
// run the debug functions for all subcommands (if build tag "debug" is
// enabled)
- if err := runDebug(); err != nil {
- return err
- }
-
- return nil
+ return runDebug()
},
}
@@ -85,7 +84,16 @@ func needsPassword(cmd string) bool {
var logBuffer = bytes.NewBuffer(nil)
+func tweakGoGC() {
+ // lower GOGC from 100 to 50, unless it was manually overwritten by the user
+ oldValue := godebug.SetGCPercent(50)
+ if oldValue != 100 {
+ godebug.SetGCPercent(oldValue)
+ }
+}
+
func main() {
+ tweakGoGC()
// install custom global logger into a buffer, if an error occurs
// we can show the logs
log.SetOutput(logBuffer)
diff --git a/doc/010_introduction.rst b/doc/010_introduction.rst
index 5c213f6cd..e6bffdea1 100644
--- a/doc/010_introduction.rst
+++ b/doc/010_introduction.rst
@@ -17,3 +17,47 @@ Introduction
Restic is a fast and secure backup program. In the following sections, we will
present typical workflows, starting with installing, preparing a new
repository, and making the first backup.
+
+Quickstart Guide
+****************
+
+To get started with a local repository, first define some environment variables:
+
+.. code-block:: console
+
+ export RESTIC_REPOSITORY=/srv/restic-repo
+ export RESTIC_PASSWORD=some-strong-password
+
+Initialize the repository (first time only):
+
+.. code-block:: console
+
+ restic init
+
+Create your first backup:
+
+.. code-block:: console
+
+ restic backup ~/work
+
+You can list all the snapshots you created with:
+
+.. code-block:: console
+
+ restic snapshots
+
+You can restore a backup by noting the snapshot ID you want and running:
+
+.. code-block:: console
+
+ restic restore --target /tmp/restore-work your-snapshot-ID
+
+It is a good idea to periodically check your repository's metadata:
+
+.. code-block:: console
+
+ restic check
+ # or full data:
+ restic check --read-data
+
+For more details continue reading the next sections.
diff --git a/doc/020_installation.rst b/doc/020_installation.rst
index 5ae93c94d..a39ae91e9 100644
--- a/doc/020_installation.rst
+++ b/doc/020_installation.rst
@@ -40,7 +40,7 @@ package from the official community repos, e.g. using ``apk``:
Arch Linux
==========
-On `Arch Linux <https://www.archlinux.org/>`__, there is a package called ``restic``
+On `Arch Linux <https://archlinux.org/>`__, there is a package called ``restic``
installed from the official community repos, e.g. with ``pacman -S``:
.. code-block:: console
@@ -93,7 +93,7 @@ You may also install it using `MacPorts <https://www.macports.org/>`__:
Nix & NixOS
===========
-If you are using `Nix <https://nixos.org/nix/>`__ or `NixOS <https://nixos.org/>`__
+If you are using `Nix / NixOS <https://nixos.org>`__
there is a package available named ``restic``.
It can be installed using ``nix-env``:
@@ -265,13 +265,24 @@ binary, you can get it with `docker pull` like this:
$ docker pull restic/restic
+The container is also available on the GitHub Container Registry:
+
+.. code-block:: console
+
+ $ docker pull ghcr.io/restic/restic
+
+Restic relies on the hostname for various operations. Make sure to set a static
+hostname using `--hostname` when creating a Docker container, otherwise Docker
+will assign a random hostname each time.
+
From Source
***********
restic is written in the Go programming language and you need at least
-Go version 1.18. Building restic may also work with older versions of Go,
+Go version 1.18. Building for Solaris requires at least Go version 1.20.
+Building restic may also work with older versions of Go,
but that's not supported. See the `Getting
-started <https://golang.org/doc/install>`__ guide of the Go project for
+started <https://go.dev/doc/install>`__ guide of the Go project for
instructions how to install Go.
In order to build restic from source, execute the following steps:
diff --git a/doc/030_preparing_a_new_repo.rst b/doc/030_preparing_a_new_repo.rst
index 39a3a0744..04c189d07 100644
--- a/doc/030_preparing_a_new_repo.rst
+++ b/doc/030_preparing_a_new_repo.rst
@@ -90,7 +90,7 @@ command and enter the same password twice:
data from a CIFS share is not recommended due to compatibility issues in
older Linux kernels. Either use another backend or set the environment
variable `GODEBUG` to `asyncpreemptoff=1`. Refer to GitHub issue
- `#2659 <https://github.com/restic/restic/issues/2659>`_ for further explanations.
+ :issue:`2659` for further explanations.
SFTP
****
@@ -273,7 +273,7 @@ For an S3-compatible server that is not Amazon (like Minio, see below),
or is only available via HTTP, you can specify the URL to the server
like this: ``s3:http://server:port/bucket_name``.
-.. note:: restic expects `path-style URLs <https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro>`__
+.. note:: restic expects `path-style URLs <https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-bucket-intro.html>`__
like for example ``s3.us-west-2.amazonaws.com/bucket_name``.
Virtual-hosted–style URLs like ``bucket_name.s3.us-west-2.amazonaws.com``,
where the bucket name is part of the hostname are not supported. These must
@@ -290,12 +290,11 @@ like this: ``s3:http://server:port/bucket_name``.
Minio Server
************
-`Minio <https://www.minio.io>`__ is an Open Source Object Storage,
+`Minio <https://min.io/>`__ is an Open Source Object Storage,
written in Go and compatible with Amazon S3 API.
-- Download and Install `Minio
- Server <https://minio.io/downloads/#minio-server>`__.
-- You can also refer to https://docs.minio.io for step by step guidance
+- Download and Install `Minio Download <https://min.io/download#/linux>`__.
+- You can also refer to `Minio Docs <https://min.io/docs/minio/linux/>`__ for step by step guidance
on installation and getting started on Minio Client and Minio Server.
You must first setup the following environment variables with the
@@ -350,7 +349,7 @@ this command.
Alibaba Cloud (Aliyun) Object Storage System (OSS)
**************************************************
-`Alibaba OSS <https://www.alibabacloud.com/product/oss/>`__ is an
+`Alibaba OSS <https://www.alibabacloud.com/product/object-storage-service>`__ is an
encrypted, secure, cost-effective, and easy-to-use object storage
service that enables you to store, back up, and archive large amounts
of data in the cloud.
@@ -358,7 +357,7 @@ of data in the cloud.
Alibaba OSS is S3 compatible so it can be used as a storage provider
for a restic repository with a couple of extra parameters.
-- Determine the correct `Alibaba OSS region endpoint <https://www.alibabacloud.com/help/doc-detail/31837.htm>`__ - this will be something like ``oss-eu-west-1.aliyuncs.com``
+- Determine the correct `Alibaba OSS region endpoint <https://www.alibabacloud.com/help/en/object-storage-service/latest/regions-and-endpoints>`__ - this will be something like ``oss-eu-west-1.aliyuncs.com``
- You'll need the region name too - this will be something like ``oss-eu-west-1``
You must first setup the following environment variables with the
@@ -441,7 +440,7 @@ the naming convention of those variables follows the official Python Swift clien
Restic should be compatible with an `OpenStack RC file
-<https://docs.openstack.org/user-guide/common/cli-set-environment-variables-using-openstack-rc.html>`__
+<https://docs.openstack.org/ocata/admin-guide/common/cli-set-environment-variables-using-openstack-rc.html>`__
in most cases.
Once environment variables are set up, a new repository can be created. The
@@ -524,20 +523,31 @@ Microsoft Azure Blob Storage
****************************
You can also store backups on Microsoft Azure Blob Storage. Export the Azure
-Blob Storage account name and key as follows:
+Blob Storage account name:
.. code-block:: console
$ export AZURE_ACCOUNT_NAME=<ACCOUNT_NAME>
- $ export AZURE_ACCOUNT_KEY=<SECRET_KEY>
-or
+For authentication export one of the following variables:
.. code-block:: console
- $ export AZURE_ACCOUNT_NAME=<ACCOUNT_NAME>
+ # For storage account key
+ $ export AZURE_ACCOUNT_KEY=<SECRET_KEY>
+ # For SAS
$ export AZURE_ACCOUNT_SAS=<SAS_TOKEN>
+Alternatively, if run on Azure, restic will automatically uses service accounts configured
+via the standard environment variables or Workload / Managed Identities.
+
+Restic will by default use Azure's global domain ``core.windows.net`` as endpoint suffix.
+You can specify other suffixes as follows:
+
+.. code-block:: console
+
+ $ export AZURE_ENDPOINT_SUFFIX=<ENDPOINT_SUFFIX>
+
Afterwards you can initialize a repository in a container called ``foo`` in the
root path like this:
@@ -614,9 +624,11 @@ The number of concurrent connections to the GCS service can be set with the
``-o gs.connections=10`` switch. By default, at most five parallel connections are
established.
-.. _service account: https://cloud.google.com/iam/docs/service-accounts
-.. _create a service account key: https://cloud.google.com/iam/docs/creating-managing-service-account-keys#iam-service-account-keys-create-console
-.. _default authentication material: https://cloud.google.com/docs/authentication/production
+The region, where a bucket should be created, can be specified with the ``-o gs.region=us`` switch. By default, the region is set to ``us``.
+
+.. _service account: https://cloud.google.com/iam/docs/service-account-overview
+.. _create a service account key: https://cloud.google.com/iam/docs/keys-create-delete
+.. _default authentication material: https://cloud.google.com/docs/authentication#service-accounts
.. _other-services:
@@ -748,7 +760,7 @@ Password prompt on Windows
At the moment, restic only supports the default Windows console
interaction. If you use emulation environments like
-`MSYS2 <https://msys2.github.io/>`__ or
+`MSYS2 <https://www.msys2.org/>`__ or
`Cygwin <https://www.cygwin.com/>`__, which use terminals like
``Mintty`` or ``rxvt``, you may get a password error.
diff --git a/doc/040_backup.rst b/doc/040_backup.rst
index b9996311d..1655e7eed 100644
--- a/doc/040_backup.rst
+++ b/doc/040_backup.rst
@@ -139,13 +139,24 @@ File change detection
*********************
When restic encounters a file that has already been backed up, whether in the
-current backup or a previous one, it makes sure the file's contents are only
+current backup or a previous one, it makes sure the file's content is only
stored once in the repository. To do so, it normally has to scan the entire
-contents of every file. Because this can be very expensive, restic also uses a
+content of the file. Because this can be very expensive, restic also uses a
change detection rule based on file metadata to determine whether a file is
likely unchanged since a previous backup. If it is, the file is not scanned
again.
+The previous backup snapshot, called "parent" snaphot in restic terminology,
+is determined as follows. By default restic groups snapshots by hostname and
+backup paths, and then selects the latest snapshot in the group that matches
+the current backup. You can change the selection criteria using the
+``--group-by`` option, which defaults to ``host,paths``. To select the latest
+snapshot with the same paths independent of the hostname, use ``paths``. Or,
+to only consider the hostname and tags, use ``host,tags``. Alternatively, it
+is possible to manually specify a specific parent snapshot using the
+``--parent`` option. Finally, note that one would normally set the
+``--group-by`` option for the ``forget`` command to the same value.
+
Change detection is only performed for regular files (not special files,
symlinks or directories) that have the exact same path as they did in a
previous backup of the same location. If a file or one of its containing
@@ -205,6 +216,7 @@ Combined with ``--verbose``, you can see a list of changes:
Would be added to the repository: 25.551 MiB
.. _backup-excluding-files:
+
Excluding Files
***************
@@ -213,7 +225,7 @@ the exclude options are:
- ``--exclude`` Specified one or more times to exclude one or more items
- ``--iexclude`` Same as ``--exclude`` but ignores the case of paths
-- ``--exclude-caches`` Specified once to exclude folders containing `this special file <https://bford.info/cachedir/>`__
+- ``--exclude-caches`` Specified once to exclude a folder's content if it contains `the special CACHEDIR.TAG file <https://bford.info/cachedir/>`__, but keep ``CACHEDIR.TAG``.
- ``--exclude-file`` Specified one or more times to exclude items listed in a given file
- ``--iexclude-file`` Same as ``exclude-file`` but ignores cases like in ``--iexclude``
- ``--exclude-if-present foo`` Specified one or more times to exclude a folder's content if it contains a file called ``foo`` (optionally having a given header, no wildcards for the file name supported)
@@ -242,14 +254,14 @@ This instructs restic to exclude files matching the following criteria:
* All files matching ``*.go`` (second line in ``excludes.txt``)
* All files and sub-directories named ``bar`` which reside somewhere below a directory called ``foo`` (fourth line in ``excludes.txt``)
-Patterns use `filepath.Glob <https://golang.org/pkg/path/filepath/#Glob>`__ internally,
-see `filepath.Match <https://golang.org/pkg/path/filepath/#Match>`__ for
-syntax. Patterns are tested against the full path of a file/dir to be saved,
+Patterns use the syntax of the Go function
+`filepath.Match <https://pkg.go.dev/path/filepath#Match>`__
+and are tested against the full path of a file/dir to be saved,
even if restic is passed a relative path to save. Empty lines and lines
starting with a ``#`` are ignored.
Environment variables in exclude files are expanded with `os.ExpandEnv
-<https://golang.org/pkg/os/#ExpandEnv>`__, so ``/home/$USER/foo`` will be
+<https://pkg.go.dev/os#ExpandEnv>`__, so ``/home/$USER/foo`` will be
expanded to ``/home/bob/foo`` for the user ``bob``. To get a literal dollar
sign, write ``$$`` to the file - this has to be done even when there's no
matching environment variable for the word following a single ``$``. Note
@@ -369,7 +381,7 @@ contains one *pattern* per line. The file must be encoded as UTF-8, or UTF-16
with a byte-order mark. Leading and trailing whitespace is removed from the
patterns. Empty lines and lines starting with a ``#`` are ignored and each
pattern is expanded when read, such that special characters in it are expanded
-using the Go function `filepath.Glob <https://golang.org/pkg/path/filepath/#Glob>`__
+using the Go function `filepath.Glob <https://pkg.go.dev/path/filepath#Glob>`__
- please see its documentation for the syntax you can use in the patterns.
The argument passed to ``--files-from-verbatim`` must be the name of a text file
@@ -439,6 +451,15 @@ and displays a small statistic, just pass the command two snapshot IDs:
Added: 16.403 MiB
Removed: 16.402 MiB
+To only compare files in specific subfolders, you can use the ``<snapshot>:<subfolder>``
+syntax, where ``snapshot`` is the ID of a snapshot (or the string ``latest``) and ``subfolder``
+is a path within the snapshot. For example, to only compare files in the ``/restic``
+folder, you could use the following command:
+
+.. code-block:: console
+
+ $ restic -r /srv/restic-repo diff 5845b002:/restic 2ab627a6:/restic
+
Backing up special items and metadata
*************************************
@@ -521,8 +542,11 @@ Restic does not have a built-in way of scheduling backups, as it's a tool
that runs when executed rather than a daemon. There are plenty of different
ways to schedule backup runs on various different platforms, e.g. systemd
and cron on Linux/BSD and Task Scheduler in Windows, depending on one's
-needs and requirements. When scheduling restic to run recurringly, please
-make sure to detect already running instances before starting the backup.
+needs and requirements. If you don't want to implement your own scheduling,
+you can use `resticprofile <https://github.com/creativeprojects/resticprofile/#resticprofile>`__.
+
+When scheduling restic to run recurringly, please make sure to detect already
+running instances before starting the backup.
Space requirements
******************
@@ -552,6 +576,8 @@ environment variables. The following lists these environment variables:
RESTIC_PASSWORD The actual password for the repository
RESTIC_PASSWORD_COMMAND Command printing the password for the repository to stdout
RESTIC_KEY_HINT ID of key to try decrypting first, before other keys
+ RESTIC_CACERT Location(s) of certificate file(s), comma separated if multiple (replaces --cacert)
+ RESTIC_TLS_CLIENT_CERT Location of TLS client certificate and private key (replaces --tls-client-cert)
RESTIC_CACHE_DIR Location of the cache directory
RESTIC_COMPRESSION Compression mode (only available for repository format version 2)
RESTIC_PROGRESS_FPS Frames per second by which the progress bar is updated
@@ -599,6 +625,7 @@ environment variables. The following lists these environment variables:
AZURE_ACCOUNT_NAME Account name for Azure
AZURE_ACCOUNT_KEY Account key for Azure
AZURE_ACCOUNT_SAS Shared access signatures (SAS) for Azure
+ AZURE_ENDPOINT_SUFFIX Endpoint suffix for Azure Storage (default: core.windows.net)
GOOGLE_PROJECT_ID Project ID for Google Cloud Storage
GOOGLE_APPLICATION_CREDENTIALS Application Credentials for Google Cloud Storage (e.g. $HOME/.config/gs-secret-restic-key.json)
diff --git a/doc/045_working_with_repos.rst b/doc/045_working_with_repos.rst
index 00d87a450..82a20bac4 100644
--- a/doc/045_working_with_repos.rst
+++ b/doc/045_working_with_repos.rst
@@ -232,6 +232,8 @@ modifying the repository. Instead restic will only print the actions it would
perform.
+.. _checking-integrity:
+
Checking integrity and consistency
==================================
@@ -284,6 +286,14 @@ If the repository structure is intact, restic will show that no errors were foun
check snapshots, trees and blobs
no errors were found
+By default, check creates a new temporary cache directory to verify that the
+data stored in the repository is intact. To reuse the existing cache, you can
+use the ``--with-cache`` flag.
+
+If the cache directory is not explicitly set, then ``check`` creates its
+temporary cache directory in the temporary directory, see :ref:`temporary_files`.
+Otherwise, the specified cache directory is used, as described in :ref:`caching`.
+
By default, the ``check`` command does not verify that the actual pack files
on disk in the repository are unmodified, because doing so requires reading
a copy of every pack file in the repository. To tell restic to also verify the
diff --git a/doc/050_restore.rst b/doc/050_restore.rst
index b0ea021cc..ed2ddfd40 100644
--- a/doc/050_restore.rst
+++ b/doc/050_restore.rst
@@ -48,6 +48,18 @@ files in the snapshot. For example, to restore a single file:
This will restore the file ``foo`` to ``/tmp/restore-work/work/foo``.
+To only restore a specific subfolder, you can use the ``<snapshot>:<subfolder>``
+syntax, where ``snapshot`` is the ID of a snapshot (or the string ``latest``)
+and ``subfolder`` is a path within the snapshot.
+
+.. code-block:: console
+
+ $ restic -r /srv/restic-repo restore 79766175:/work --target /tmp/restore-work --include /foo
+ enter password for repository:
+ restoring <Snapshot of [/home/user/work] at 2015-05-08 21:40:19.884408621 +0200 CEST> to /tmp/restore-work
+
+This will restore the file ``foo`` to ``/tmp/restore-work/foo``.
+
You can use the command ``restic ls latest`` or ``restic find foo`` to find the
path to the file within the snapshot. This path you can then pass to
``--include`` in verbatim to only restore the single file or directory.
@@ -96,6 +108,11 @@ hard links from a fuse mount should be done by a program that preserves
hard links. A program that does so is ``rsync``, used with the option
--hard-links.
+.. note:: ``restic mount`` is mostly useful if you want to restore just a few
+ files out of a snapshot, or to check which files are contained in a snapshot.
+ To restore many files or a whole snapshot, ``restic restore`` is the best
+ alternative, often it is *significantly* faster.
+
Printing files to stdout
========================
@@ -146,8 +163,14 @@ output the contents in the tar (default) or zip format:
.. code-block:: console
$ restic -r /srv/restic-repo dump latest /home/other/work > restore.tar
-
+
.. code-block:: console
$ restic -r /srv/restic-repo dump -a zip latest /home/other/work > restore.zip
+The folder content is then contained at ``/home/other/work`` within the archive.
+To include the folder content at the root of the archive, you can use the ``<snapshot>:<subfolder>`` syntax:
+
+.. code-block:: console
+
+ $ restic -r /srv/restic-repo dump latest:/home/other/work / > restore.tar
diff --git a/doc/060_forget.rst b/doc/060_forget.rst
index a4205de75..72c7ae97f 100644
--- a/doc/060_forget.rst
+++ b/doc/060_forget.rst
@@ -205,6 +205,7 @@ The ``forget`` command accepts the following policy options:
natural time boundaries and *not* relative to when you run ``forget``. Weeks
are Monday 00:00 to Sunday 23:59, days 00:00 to 23:59, hours :00 to :59, etc.
They also only count hours/days/weeks/etc which have one or more snapshots.
+ A value of ``-1`` will be interpreted as "forever", i.e. "keep all".
.. note:: All duration related options (``--keep-{within,-*}``) ignore snapshots
with a timestamp in the future (relative to when the ``forget`` command is
@@ -219,6 +220,8 @@ paths and tags. The policy is then applied to each group of snapshots individual
This is a safety feature to prevent accidental removal of unrelated backup sets. To
disable grouping and apply the policy to all snapshots regardless of their host,
paths and tags, use ``--group-by ''`` (that is, an empty value to ``--group-by``).
+Note that one would normally set the ``--group-by`` option for the ``backup``
+command to the same value.
Additionally, you can restrict the policy to only process snapshots which have a
particular hostname with the ``--host`` parameter, or tags with the ``--tag``
@@ -469,7 +472,7 @@ space. However, a **failed** ``prune`` run can cause the repository to become
**temporarily unusable**. Therefore, make sure that you have a stable connection to the
repository storage, before running this command. In case the command fails, it may become
necessary to manually remove all files from the `index/` folder of the repository and
-run `rebuild-index` afterwards.
+run `repair index` afterwards.
To prevent accidental usages of the ``--unsafe-recover-no-free-space`` option it is
necessary to first run ``prune --unsafe-recover-no-free-space SOME-ID`` and then replace
diff --git a/doc/070_encryption.rst b/doc/070_encryption.rst
index a7b8716ac..dc651cc07 100644
--- a/doc/070_encryption.rst
+++ b/doc/070_encryption.rst
@@ -19,7 +19,7 @@ Encryption
the implementation looks sane and I guess the deduplication trade-off is worth
it. So… I’m going to use restic for my personal backups.*" `Filippo Valsorda`_
-.. _Filippo Valsorda: https://blog.filippo.io/restic-cryptography/
+.. _Filippo Valsorda: https://words.filippo.io/restic-cryptography/
**********************
Manage repository keys
diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst
index 712a70244..a4b983d7c 100644
--- a/doc/075_scripting.rst
+++ b/doc/075_scripting.rst
@@ -22,18 +22,553 @@ Check if a repository is already initialized
You may find a need to check if a repository is already initialized,
perhaps to prevent your script from initializing a repository multiple
-times. The command ``snapshots`` may be used for this purpose:
+times. The command ``cat config`` may be used for this purpose:
.. code-block:: console
- $ restic -r /srv/restic-repo snapshots
- Fatal: unable to open config file: Stat: stat /srv/restic-repo/config: no such file or directory
+ $ restic -r /srv/restic-repo cat config
+ Fatal: unable to open config file: stat /srv/restic-repo/config: no such file or directory
Is there a repository at the following location?
/srv/restic-repo
If a repository does not exist, restic will return a non-zero exit code
and print an error message. Note that restic will also return a non-zero
exit code if a different error is encountered (e.g.: incorrect password
-to ``snapshots``) and it may print a different error message. If there
-are no errors, restic will return a zero exit code and print all the
-snapshots.
+to ``cat config``) and it may print a different error message. If there
+are no errors, restic will return a zero exit code and print the repository
+metadata.
+
+JSON output
+***********
+
+Restic outputs JSON data to ``stdout`` if requested with the ``--json`` flag.
+The structure of that data varies depending on the circumstance. The
+JSON output of most restic commands are documented here.
+
+.. note::
+ Not all commands support JSON output. If a command does not support JSON output,
+ feel free to submit a pull request!
+
+.. warning::
+ We try to keep the JSON output backwards compatible. However, new message types
+ or fields may be added at any time. Similarly, enum-like fields for which a fixed
+ list of allowed values is documented may be extended at any time.
+
+
+Output formats
+--------------
+
+Currently only the output on ``stdout`` is JSON formatted. Errors printed on ``stderr``
+are still printed as plain text messages. The generated JSON output uses one of the
+following two formats.
+
+Single JSON document
+^^^^^^^^^^^^^^^^^^^^
+
+Several commands output a single JSON document that can be parsed in its entirety.
+Depending on the command, the output consists of either a single or multiple lines.
+
+JSON lines
+^^^^^^^^^^
+
+Several commands, in particular long running ones or those that generate a large output,
+use a format also known as JSON lines. It consists of a stream of new-line separated JSON
+messages. You can determine the nature of the message using the ``message_type`` field.
+
+As an exception, the ``ls`` command uses the field ``struct_type`` instead.
+
+
+backup
+------
+
+The ``backup`` command uses the JSON lines format with the following message types.
+
+Status
+^^^^^^
+
++----------------------+------------------------------------------------------------+
+|``message_type`` | Always "status" |
++----------------------+------------------------------------------------------------+
+|``seconds_elapsed`` | Time since backup started |
++----------------------+------------------------------------------------------------+
+|``seconds_remaining`` | Estimated time remaining |
++----------------------+------------------------------------------------------------+
+|``percent_done`` | Percentage of data backed up (bytes_done/total_bytes) |
++----------------------+------------------------------------------------------------+
+|``total_files`` | Total number of files detected |
++----------------------+------------------------------------------------------------+
+|``files_done`` | Files completed (backed up to repo) |
++----------------------+------------------------------------------------------------+
+|``total_bytes`` | Total number of bytes in backup set |
++----------------------+------------------------------------------------------------+
+|``bytes_done`` | Number of bytes completed (backed up to repo) |
++----------------------+------------------------------------------------------------+
+|``error_count`` | Number of errors |
++----------------------+------------------------------------------------------------+
+|``current_files`` | List of files currently being backed up |
++----------------------+------------------------------------------------------------+
+
+Error
+^^^^^
+
++----------------------+-------------------------------------------+
+| ``message_type`` | Always "error" |
++----------------------+-------------------------------------------+
+| ``error`` | Error message |
++----------------------+-------------------------------------------+
+| ``during`` | What restic was trying to do |
++----------------------+-------------------------------------------+
+| ``item`` | Usually, the path of the problematic file |
++----------------------+-------------------------------------------+
+
+Verbose Status
+^^^^^^^^^^^^^^
+
+Verbose status provides details about the progress, including details about backed up files.
+
++----------------------+-----------------------------------------------------------+
+| ``message_type`` | Always "verbose_status" |
++----------------------+-----------------------------------------------------------+
+| ``action`` | Either "new", "unchanged", "modified" or "scan_finished" |
++----------------------+-----------------------------------------------------------+
+| ``item`` | The item in question |
++----------------------+-----------------------------------------------------------+
+| ``duration`` | How long it took, in seconds |
++----------------------+-----------------------------------------------------------+
+| ``data_size`` | How big the item is |
++----------------------+-----------------------------------------------------------+
+| ``metadata_size`` | How big the metadata is |
++----------------------+-----------------------------------------------------------+
+| ``total_files`` | Total number of files |
++----------------------+-----------------------------------------------------------+
+
+Summary
+^^^^^^^
+
+Summary is the last output line in a successful backup.
+
++---------------------------+---------------------------------------------------------+
+| ``message_type`` | Always "summary" |
++---------------------------+---------------------------------------------------------+
+| ``files_new`` | Number of new files |
++---------------------------+---------------------------------------------------------+
+| ``files_changed`` | Number of files that changed |
++---------------------------+---------------------------------------------------------+
+| ``files_unmodified`` | Number of files that did not change |
++---------------------------+---------------------------------------------------------+
+| ``dirs_new`` | Number of new directories |
++---------------------------+---------------------------------------------------------+
+| ``dirs_changed`` | Number of directories that changed |
++---------------------------+---------------------------------------------------------+
+| ``dirs_unmodified`` | Number of directories that did not change |
++---------------------------+---------------------------------------------------------+
+| ``data_blobs`` | Number of data blobs |
++---------------------------+---------------------------------------------------------+
+| ``tree_blobs`` | Number of tree blobs |
++---------------------------+---------------------------------------------------------+
+| ``data_added`` | Amount of data added, in bytes |
++---------------------------+---------------------------------------------------------+
+| ``total_files_processed`` | Total number of files processed |
++---------------------------+---------------------------------------------------------+
+| ``total_bytes_processed`` | Total number of bytes processed |
++---------------------------+---------------------------------------------------------+
+| ``total_duration`` | Total time it took for the operation to complete |
++---------------------------+---------------------------------------------------------+
+| ``snapshot_id`` | ID of the new snapshot |
++---------------------------+---------------------------------------------------------+
+
+
+cat
+---
+
+The ``cat`` command returns data about various objects in the repository, which
+are stored in JSON form. Specifying ``--json`` or ``--quiet`` will suppress any
+non-JSON messages the command generates.
+
+
+diff
+----
+
+The ``diff`` command uses the JSON lines format with the following message types.
+
+change
+^^^^^^
+
++------------------+--------------------------------------------------------------+
+| ``message_type`` | Always "change" |
++------------------+--------------------------------------------------------------+
+| ``path`` | Path that has changed |
++------------------+--------------------------------------------------------------+
+| ``modifier`` | Type of change, a concatenation of the following characters: |
+| | "+" = added, "-" = removed, "T" = entry type changed, |
+| | "M" = file content changed, "U" = metadata changed |
++------------------+--------------------------------------------------------------+
+
+statistics
+^^^^^^^^^^
+
++---------------------+----------------------------+
+| ``message_type`` | Always "statistics" |
++---------------------+----------------------------+
+| ``source_snapshot`` | ID of first snapshot |
++---------------------+----------------------------+
+| ``target_snapshot`` | ID of second snapshot |
++---------------------+----------------------------+
+| ``changed_files`` | Number of changed files |
++---------------------+----------------------------+
+| ``added`` | DiffStat object, see below |
++---------------------+----------------------------+
+| ``removed`` | DiffStat object, see below |
++---------------------+----------------------------+
+
+DiffStat object
+
++----------------+-------------------------------------------+
+| ``files`` | Number of changed files |
++----------------+-------------------------------------------+
+| ``dirs`` | Number of changed directories |
++----------------+-------------------------------------------+
+| ``others`` | Number of changed other directory entries |
++----------------+-------------------------------------------+
+| ``data_blobs`` | Number of data blobs |
++----------------+-------------------------------------------+
+| ``tree_blobs`` | Number of tree blobs |
++----------------+-------------------------------------------+
+| ``bytes`` | Number of bytes |
++----------------+-------------------------------------------+
+
+
+find
+----
+
+The ``find`` command outputs a single JSON document containing an array of JSON
+objects with matches for your search term. These matches are organized by snapshot.
+
+If the ``--blob`` or ``--tree`` option is passed, then the output is an array of
+Blob objects.
+
+
++-----------------+----------------------------------------------+
+| ``hits`` | Number of matches in the snapshot |
++-----------------+----------------------------------------------+
+| ``snapshot`` | ID of the snapshot |
++-----------------+----------------------------------------------+
+| ``matches`` | Array of Match objects detailing a match |
++-----------------+----------------------------------------------+
+
+Match object
+
++-----------------+----------------------------------------------+
+| ``path`` | Object path |
++-----------------+----------------------------------------------+
+| ``permissions`` | UNIX permissions |
++-----------------+----------------------------------------------+
+| ``type`` | Object type e.g. file, dir, etc... |
++-----------------+----------------------------------------------+
+| ``atime`` | Access time |
++-----------------+----------------------------------------------+
+| ``mtime`` | Modification time |
++-----------------+----------------------------------------------+
+| ``ctime`` | Change time |
++-----------------+----------------------------------------------+
+| ``name`` | Object name |
++-----------------+----------------------------------------------+
+| ``user`` | Name of owner |
++-----------------+----------------------------------------------+
+| ``group`` | Name of group |
++-----------------+----------------------------------------------+
+| ``mode`` | UNIX file mode, shorthand of ``permissions`` |
++-----------------+----------------------------------------------+
+| ``device_id`` | OS specific device identifier |
++-----------------+----------------------------------------------+
+| ``links`` | Number of hardlinks |
++-----------------+----------------------------------------------+
+| ``uid`` | ID of owner |
++-----------------+----------------------------------------------+
+| ``gid`` | ID of group |
++-----------------+----------------------------------------------+
+| ``size`` | Size of object in bytes |
++-----------------+----------------------------------------------+
+
+Blob object
+
++-----------------+--------------------------------------------+
+| ``object_type`` | Either "blob" or "tree" |
++-----------------+--------------------------------------------+
+| ``id`` | ID of found blob |
++-----------------+--------------------------------------------+
+| ``path`` | Path in snapshot |
++-----------------+--------------------------------------------+
+| ``parent_tree`` | Parent tree blob, only set for type "blob" |
++-----------------+--------------------------------------------+
+| ``snapshot`` | Snapshot ID |
++-----------------+--------------------------------------------+
+| ``time`` | Snapshot timestamp |
++-----------------+--------------------------------------------+
+
+
+forget
+------
+
+The ``forget`` command prints a single JSON document containing an array of
+ForgetGroups. If specific snapshot IDs are specified, then no output is generated.
+
+The ``prune`` command does not yet support JSON such that ``forget --prune``
+results in a mix of JSON and text output.
+
+ForgetGroup
+^^^^^^^^^^^
+
++-------------+-----------------------------------------------------------+
+| ``tags`` | Tags identifying the snapshot group |
++-------------+-----------------------------------------------------------+
+| ``host`` | Host identifying the snapshot group |
++-------------+-----------------------------------------------------------+
+| ``paths`` | Paths identifying the snapshot group |
++-------------+-----------------------------------------------------------+
+| ``keep`` | Array of Snapshot objects that are kept |
++-------------+-----------------------------------------------------------+
+| ``remove`` | Array of Snapshot objects that were removed |
++-------------+-----------------------------------------------------------+
+| ``reasons`` | Array of Reason objects describing why a snapshot is kept |
++-------------+-----------------------------------------------------------+
+
+Snapshot object
+
++---------------------+--------------------------------------------------+
+| ``time`` | Timestamp of when the backup was started |
++---------------------+--------------------------------------------------+
+| ``parent`` | ID of the parent snapshot |
++---------------------+--------------------------------------------------+
+| ``tree`` | ID of the root tree blob |
++---------------------+--------------------------------------------------+
+| ``paths`` | List of paths included in the backup |
++---------------------+--------------------------------------------------+
+| ``hostname`` | Hostname of the backed up machine |
++---------------------+--------------------------------------------------+
+| ``username`` | Username the backup command was run as |
++---------------------+--------------------------------------------------+
+| ``uid`` | ID of owner |
++---------------------+--------------------------------------------------+
+| ``gid`` | ID of group |
++---------------------+--------------------------------------------------+
+| ``excludes`` | List of paths and globs excluded from the backup |
++---------------------+--------------------------------------------------+
+| ``tags`` | List of tags for the snapshot in question |
++---------------------+--------------------------------------------------+
+| ``program_version`` | restic version used to create snapshot |
++---------------------+--------------------------------------------------+
+| ``id`` | Snapshot ID |
++---------------------+--------------------------------------------------+
+| ``short_id`` | Snapshot ID, short form |
++---------------------+--------------------------------------------------+
+
+Reason object
+
++----------------+---------------------------------------------------------+
+| ``snapshot`` | Snapshot object, without ``id`` and ``short_id`` fields |
++----------------+---------------------------------------------------------+
+| ``matches`` | Array containing descriptions of the matching criteria |
++----------------+---------------------------------------------------------+
+| ``counters`` | Object containing counters used by the policies |
++----------------+---------------------------------------------------------+
+
+
+init
+----
+
+The ``init`` command uses the JSON lines format, but only outputs a single message.
+
++------------------+--------------------------------+
+| ``message_type`` | Always "initialized" |
++------------------+--------------------------------+
+| ``id`` | ID of the created repository |
++------------------+--------------------------------+
+| ``repository`` | URL of the repository |
++------------------+--------------------------------+
+
+
+key list
+--------
+
+The ``key list`` command returns an array of objects with the following structure.
+
++--------------+------------------------------------+
+| ``current`` | Is currently used key? |
++--------------+------------------------------------+
+| ``id`` | Unique key ID |
++--------------+------------------------------------+
+| ``userName`` | User who created it |
++--------------+------------------------------------+
+| ``hostName`` | Name of machine it was created on |
++--------------+------------------------------------+
+| ``created`` | Timestamp when it was created |
++--------------+------------------------------------+
+
+
+ls
+--
+
+The ``ls`` command uses the JSON lines format with the following message types.
+As an exception, the ``struct_type`` field is used to determine the message type.
+
+snapshot
+^^^^^^^^
+
++----------------+--------------------------------------------------+
+| ``struct_type``| Always "snapshot" |
++----------------+--------------------------------------------------+
+| ``time`` | Timestamp of when the backup was started |
++----------------+--------------------------------------------------+
+| ``parent`` | ID of the parent snapshot |
++----------------+--------------------------------------------------+
+| ``tree`` | ID of the root tree blob |
++----------------+--------------------------------------------------+
+| ``paths`` | List of paths included in the backup |
++----------------+--------------------------------------------------+
+| ``hostname`` | Hostname of the backed up machine |
++----------------+--------------------------------------------------+
+| ``username`` | Username the backup command was run as |
++----------------+--------------------------------------------------+
+| ``uid`` | ID of owner |
++----------------+--------------------------------------------------+
+| ``gid`` | ID of group |
++----------------+--------------------------------------------------+
+| ``excludes`` | List of paths and globs excluded from the backup |
++----------------+--------------------------------------------------+
+| ``tags`` | List of tags for the snapshot in question |
++----------------+--------------------------------------------------+
+| ``id`` | Snapshot ID |
++----------------+--------------------------------------------------+
+| ``short_id`` | Snapshot ID, short form |
++----------------+--------------------------------------------------+
+
+
+node
+^^^^
+
++-----------------+--------------------------+
+| ``struct_type`` | Always "node" |
++-----------------+--------------------------+
+| ``name`` | Node name |
++-----------------+--------------------------+
+| ``type`` | Node type |
++-----------------+--------------------------+
+| ``path`` | Node path |
++-----------------+--------------------------+
+| ``uid`` | UID of node |
++-----------------+--------------------------+
+| ``gid`` | GID of node |
++-----------------+--------------------------+
+| ``size`` | Size in bytes |
++-----------------+--------------------------+
+| ``mode`` | Node mode |
++-----------------+--------------------------+
+| ``atime`` | Node access time |
++-----------------+--------------------------+
+| ``mtime`` | Node modification time |
++-----------------+--------------------------+
+| ``ctime`` | Node creation time |
++-----------------+--------------------------+
+
+
+restore
+-------
+
+The ``restore`` command uses the JSON lines format with the following message types.
+
+Status
+^^^^^^
+
++----------------------+------------------------------------------------------------+
+|``message_type`` | Always "status" |
++----------------------+------------------------------------------------------------+
+|``seconds_elapsed`` | Time since restore started |
++----------------------+------------------------------------------------------------+
+|``percent_done`` | Percentage of data backed up (bytes_restored/total_bytes) |
++----------------------+------------------------------------------------------------+
+|``total_files`` | Total number of files detected |
++----------------------+------------------------------------------------------------+
+|``files_restored`` | Files restored |
++----------------------+------------------------------------------------------------+
+|``total_bytes`` | Total number of bytes in restore set |
++----------------------+------------------------------------------------------------+
+|``bytes_restored`` | Number of bytes restored |
++----------------------+------------------------------------------------------------+
+
+
+Summary
+^^^^^^^
+
++----------------------+------------------------------------------------------------+
+|``message_type`` | Always "summary" |
++----------------------+------------------------------------------------------------+
+|``seconds_elapsed`` | Time since restore started |
++----------------------+------------------------------------------------------------+
+|``total_files`` | Total number of files detected |
++----------------------+------------------------------------------------------------+
+|``files_restored`` | Files restored |
++----------------------+------------------------------------------------------------+
+|``total_bytes`` | Total number of bytes in restore set |
++----------------------+------------------------------------------------------------+
+|``bytes_restored`` | Number of bytes restored |
++----------------------+------------------------------------------------------------+
+
+
+snapshots
+---------
+
+The snapshots command returns a single JSON object, an array with objects of the structure outlined below.
+
++---------------------+--------------------------------------------------+
+| ``time`` | Timestamp of when the backup was started |
++---------------------+--------------------------------------------------+
+| ``parent`` | ID of the parent snapshot |
++---------------------+--------------------------------------------------+
+| ``tree`` | ID of the root tree blob |
++---------------------+--------------------------------------------------+
+| ``paths`` | List of paths included in the backup |
++---------------------+--------------------------------------------------+
+| ``hostname`` | Hostname of the backed up machine |
++---------------------+--------------------------------------------------+
+| ``username`` | Username the backup command was run as |
++---------------------+--------------------------------------------------+
+| ``uid`` | ID of owner |
++---------------------+--------------------------------------------------+
+| ``gid`` | ID of group |
++---------------------+--------------------------------------------------+
+| ``excludes`` | List of paths and globs excluded from the backup |
++---------------------+--------------------------------------------------+
+| ``tags`` | List of tags for the snapshot in question |
++---------------------+--------------------------------------------------+
+| ``program_version`` | restic version used to create snapshot |
++---------------------+--------------------------------------------------+
+| ``id`` | Snapshot ID |
++---------------------+--------------------------------------------------+
+| ``short_id`` | Snapshot ID, short form |
++---------------------+--------------------------------------------------+
+
+
+stats
+-----
+
+The snapshots command returns a single JSON object.
+
++------------------------------+-----------------------------------------------------+
+| ``total_size`` | Repository size in bytes |
++------------------------------+-----------------------------------------------------+
+| ``total_file_count`` | Number of files backed up in the repository |
++------------------------------+-----------------------------------------------------+
+| ``total_blob_count`` | Number of blobs in the repository |
++------------------------------+-----------------------------------------------------+
+| ``snapshots_count`` | Number of processed snapshots |
++------------------------------+-----------------------------------------------------+
+| ``total_uncompressed_size`` | Repository size in bytes if blobs were uncompressed |
++------------------------------+-----------------------------------------------------+
+| ``compression_ratio`` | Factor by which the already compressed data |
+| | has shrunk due to compression |
++------------------------------+-----------------------------------------------------+
+| ``compression_progress`` | Percentage of already compressed data |
++------------------------------+-----------------------------------------------------+
+| ``compression_space_saving`` | Overall space saving due to compression |
++------------------------------+-----------------------------------------------------+
diff --git a/doc/077_troubleshooting.rst b/doc/077_troubleshooting.rst
new file mode 100644
index 000000000..fe317acfc
--- /dev/null
+++ b/doc/077_troubleshooting.rst
@@ -0,0 +1,194 @@
+..
+ Normally, there are no heading levels assigned to certain characters as the structure is
+ determined from the succession of headings. However, this convention is used in Python’s
+ Style Guide for documenting which you may follow:
+
+ # with overline, for parts
+ * for chapters
+ = for sections
+ - for subsections
+ ^ for subsubsections
+ " for paragraphs
+
+#########################
+Troubleshooting
+#########################
+
+The repository format used by restic is designed to be error resistant. In
+particular, commands like, for example, ``backup`` or ``prune`` can be interrupted
+at *any* point in time without damaging the repository. You might have to run
+``unlock`` manually though, but that's it.
+
+However, a repository might be damaged if some of its files are damaged or lost.
+This can occur due to hardware failures, accidentally removing files from the
+repository or bugs in the implementation of restic.
+
+The following steps will help you recover a repository. This guide does not cover
+all possible types of repository damages. Thus, if the steps do not work for you
+or you are unsure how to proceed, then ask for help. Please always include the
+check output discussed in the next section and what steps you've taken to repair
+the repository so far.
+
+* `Forum <https://forum.restic.net/>`_
+* Our IRC channel ``#restic`` on ``irc.libera.chat``
+
+Make sure that you **use the latest available restic version**. It can contain
+bugfixes, and improvements to simplify the repair of a repository. It might also
+contain a fix for your repository problems!
+
+
+1. Find out what is damaged
+***************************
+
+The first step is always to check the repository.
+
+.. code-block:: console
+
+ $ restic check --read-data
+
+ using temporary cache in /tmp/restic-check-cache-1418935501
+ repository 12345678 opened (version 2, compression level auto)
+ created new cache in /tmp/restic-check-cache-1418935501
+ create exclusive lock for repository
+ load indexes
+ check all packs
+ check snapshots, trees and blobs
+ error for tree 7ef8ebab:
+ id 7ef8ebabc59aadda1a237d23ca7abac487b627a9b86508aa0194690446ff71f6 not found in repository
+ [0:02] 100.00% 7 / 7 snapshots
+ read all data
+ [0:05] 100.00% 25 / 25 packs
+ Fatal: repository contains errors
+
+.. note::
+
+ This will download the whole repository. If retrieving data from the backend is
+ expensive, then omit the ``--read-data`` option. Keep a copy of the check output
+ as it might be necessary later on!
+
+If the output contains warnings that the ``ciphertext verification failed`` for
+some blobs in the repository, then please ask for help in the forum or our IRC
+channel. These errors are often caused by hardware problems which **must** be
+investigated and fixed. Otherwise, the backup will be damaged again and again.
+
+Similarly, if a repository is repeatedly damaged, please open an `issue on Github
+<https://github.com/restic/restic/issues/new/choose>`_ as this could indicate a bug
+somewhere. Please include the check output and additional information that might
+help locate the problem.
+
+
+2. Backup the repository
+************************
+
+Create a full copy of the repository if possible. Or at the very least make a
+copy of the ``index`` and ``snapshots`` folders. This will allow you to roll back
+the repository if the repair procedure fails. If your repository resides in a
+cloud storage, then you can for example use `rclone <https://rclone.org/>`_ to
+make such a copy.
+
+Please disable all regular operations on the repository to prevent unexpected
+changes. Especially, ``forget`` or ``prune`` must be disabled as they could
+remove data unexpectedly.
+
+.. warning::
+
+ If you suspect hardware problems, then you *must* investigate those first.
+ Otherwise, the repository will soon be damaged again.
+
+Please take the time to understand what the commands described in the following
+do. If you are unsure, then ask for help in the forum or our IRC channel. Search
+whether your issue is already known and solved. Please take a look at the
+`forum`_ and `Github issues <https://github.com/restic/restic/issues>`_.
+
+
+3. Repair the index
+*******************
+
+Restic relies on its index to contain correct information about what data is
+stored in the repository. Thus, the first step to repair a repository is to
+repair the index:
+
+.. code-block:: console
+
+ $ restic repair index
+
+ repository a14e5863 opened (version 2, compression level auto)
+ loading indexes...
+ getting pack files to read...
+ removing not found pack file 83ad44f59b05f6bce13376b022ac3194f24ca19e7a74926000b6e316ec6ea5a4
+ rebuilding index
+ [0:00] 100.00% 27 / 27 packs processed
+ deleting obsolete index files
+ [0:00] 100.00% 3 / 3 files deleted
+ done
+
+This ensures that no longer existing files are removed from the index. All later
+steps to repair the repository rely on a correct index. That is, you must always
+repair the index first!
+
+Please note that it is not recommended to repair the index unless the repository
+is actually damaged.
+
+
+4. Run all backups (optional)
+*****************************
+
+With a correct index, the ``backup`` command guarantees that newly created
+snapshots can be restored successfully. It can also heal older snapshots,
+if the missing data is also contained in the new snapshot.
+
+Therefore, it is recommended to run all your ``backup`` tasks again. In some
+cases, this is enough to fully repair the repository.
+
+
+5. Remove missing data from snapshots
+*************************************
+
+If your repository is still missing data, then you can use the ``repair snapshots``
+command to remove all inaccessible data from the snapshots. That is, this will
+result in a limited amount of data loss. Using the ``--forget`` option, the
+command will automatically remove the original, damaged snapshots.
+
+.. code-block:: console
+
+ $ restic repair snapshots --forget
+
+ snapshot 6979421e of [/home/user/restic/restic] at 2022-11-02 20:59:18.617503315 +0100 CET)
+ file "/restic/internal/fuse/snapshots_dir.go": removed missing content
+ file "/restic/internal/restorer/restorer_unix_test.go": removed missing content
+ file "/restic/internal/walker/walker.go": removed missing content
+ saved new snapshot 7b094cea
+ removed old snapshot 6979421e
+
+ modified 1 snapshots
+
+If you did not add the ``--forget`` option, then you have to manually delete all
+modified snapshots using the ``forget`` command. In the example above, you'd have
+to run ``restic forget 6979421e``.
+
+
+6. Check the repository again
+*****************************
+
+Phew, we're almost done now. To make sure that the repository has been successfully
+repaired please run ``check`` again.
+
+.. code-block:: console
+
+ $ restic check --read-data
+
+ using temporary cache in /tmp/restic-check-cache-2569290785
+ repository a14e5863 opened (version 2, compression level auto)
+ created new cache in /tmp/restic-check-cache-2569290785
+ create exclusive lock for repository
+ load indexes
+ check all packs
+ check snapshots, trees and blobs
+ [0:00] 100.00% 7 / 7 snapshots
+ read all data
+ [0:00] 100.00% 25 / 25 packs
+ no errors were found
+
+If the ``check`` command did not complete with ``no errors were found``, then
+the repository is still damaged. At this point, please ask for help at the
+`forum`_ or our IRC channel ``#restic`` on ``irc.libera.chat``.
diff --git a/doc/090_participating.rst b/doc/090_participating.rst
index 00a387974..890bd9018 100644
--- a/doc/090_participating.rst
+++ b/doc/090_participating.rst
@@ -33,8 +33,8 @@ The debug log will always contain all log messages restic generates. You
can also instruct restic to print some or all debug messages to stderr.
These can also be limited to e.g. a list of source files or a list of
patterns for function names. The patterns are globbing patterns (see the
-documentation for `path.Glob <https://golang.org/pkg/path/#Glob>`__), multiple
-patterns are separated by commas. Patterns are case sensitive.
+documentation for `filepath.Match <https://pkg.go.dev/path/filepath#Match>`__).
+Multiple patterns are separated by commas. Patterns are case sensitive.
Printing all log messages to the console can be achieved by setting the
file filter to ``*``:
diff --git a/doc/110_talks.rst b/doc/110_talks.rst
index 06952896f..e32cda62a 100644
--- a/doc/110_talks.rst
+++ b/doc/110_talks.rst
@@ -17,6 +17,8 @@ Talks
The following talks will be or have been given about restic:
+- 2021-04-02: `The Changelog: Restic has your backup
+ (Podcast) <https://changelog.com/podcast/434>`__
- 2016-01-31: Lightning Talk at the Go Devroom at FOSDEM 2016,
Brussels, Belgium
- 2016-01-29: `restic - Backups mal
@@ -24,11 +26,11 @@ The following talks will be or have been given about restic:
Public lecture in German at `CCC Cologne
e.V. <https://koeln.ccc.de>`__ in Cologne, Germany
- 2015-08-23: `A Solution to the Backup
- Inconvenience <https://programm.froscon.de/2015/events/1515.html>`__:
- Lecture at `FROSCON 2015 <https://www.froscon.de>`__ in Bonn, Germany
+ Inconvenience <https://programm.froscon.org/2015/events/1515.html>`__:
+ Lecture at `FROSCON 2015 <https://www.froscon.org/>`__ in Bonn, Germany
- 2015-02-01: `Lightning Talk at FOSDEM
2015 <https://www.youtube.com/watch?v=oM-MfeflUZ8&t=11m40s>`__: A
short introduction (with slightly outdated command line)
- 2015-01-27: `Talk about restic at CCC
- Aachen <https://videoag.fsmpi.rwth-aachen.de/?view=player&lectureid=4442#content>`__
+ Aachen <https://video.fsmpi.rwth-aachen.de/cccac/4442>`__
(in German)
diff --git a/doc/bash-completion.sh b/doc/bash-completion.sh
index 42f459f65..44221554e 100644
--- a/doc/bash-completion.sh
+++ b/doc/bash-completion.sh
@@ -414,6 +414,12 @@ _restic_backup()
flags+=("-f")
local_nonpersistent_flags+=("--force")
local_nonpersistent_flags+=("-f")
+ flags+=("--group-by=")
+ two_word_flags+=("--group-by")
+ two_word_flags+=("-g")
+ local_nonpersistent_flags+=("--group-by")
+ local_nonpersistent_flags+=("--group-by=")
+ local_nonpersistent_flags+=("-g")
flags+=("--help")
flags+=("-h")
local_nonpersistent_flags+=("--help")
@@ -500,6 +506,8 @@ _restic_backup()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
@@ -570,6 +578,8 @@ _restic_cache()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
@@ -632,6 +642,8 @@ _restic_cat()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
@@ -702,6 +714,8 @@ _restic_check()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
@@ -798,6 +812,8 @@ _restic_copy()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
@@ -862,6 +878,8 @@ _restic_diff()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
@@ -944,6 +962,8 @@ _restic_dump()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
@@ -980,6 +1000,8 @@ _restic_find()
local_nonpersistent_flags+=("--host")
local_nonpersistent_flags+=("--host=")
local_nonpersistent_flags+=("-H")
+ flags+=("--human-readable")
+ local_nonpersistent_flags+=("--human-readable")
flags+=("--ignore-case")
flags+=("-i")
local_nonpersistent_flags+=("--ignore-case")
@@ -1054,6 +1076,8 @@ _restic_find()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
@@ -1222,6 +1246,8 @@ _restic_forget()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
@@ -1304,6 +1330,8 @@ _restic_generate()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
@@ -1362,6 +1390,8 @@ _restic_help()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
@@ -1451,6 +1481,8 @@ _restic_init()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
@@ -1525,6 +1557,8 @@ _restic_key()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
@@ -1587,6 +1621,8 @@ _restic_list()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
@@ -1621,6 +1657,8 @@ _restic_ls()
local_nonpersistent_flags+=("--host")
local_nonpersistent_flags+=("--host=")
local_nonpersistent_flags+=("-H")
+ flags+=("--human-readable")
+ local_nonpersistent_flags+=("--human-readable")
flags+=("--long")
flags+=("-l")
local_nonpersistent_flags+=("--long")
@@ -1669,6 +1707,8 @@ _restic_ls()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
@@ -1735,6 +1775,8 @@ _restic_migrate()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
@@ -1825,6 +1867,8 @@ _restic_mount()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
@@ -1909,6 +1953,8 @@ _restic_prune()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
@@ -1919,9 +1965,134 @@ _restic_prune()
noun_aliases=()
}
-_restic_rebuild-index()
+_restic_recover()
{
- last_command="restic_rebuild-index"
+ last_command="restic_recover"
+
+ command_aliases=()
+
+ commands=()
+
+ flags=()
+ two_word_flags=()
+ local_nonpersistent_flags=()
+ flags_with_completion=()
+ flags_completion=()
+
+ flags+=("--help")
+ flags+=("-h")
+ local_nonpersistent_flags+=("--help")
+ local_nonpersistent_flags+=("-h")
+ flags+=("--cacert=")
+ two_word_flags+=("--cacert")
+ flags+=("--cache-dir=")
+ two_word_flags+=("--cache-dir")
+ flags+=("--cleanup-cache")
+ flags+=("--compression=")
+ two_word_flags+=("--compression")
+ flags+=("--insecure-tls")
+ flags+=("--json")
+ flags+=("--key-hint=")
+ two_word_flags+=("--key-hint")
+ flags+=("--limit-download=")
+ two_word_flags+=("--limit-download")
+ flags+=("--limit-upload=")
+ two_word_flags+=("--limit-upload")
+ flags+=("--no-cache")
+ flags+=("--no-lock")
+ flags+=("--option=")
+ two_word_flags+=("--option")
+ two_word_flags+=("-o")
+ flags+=("--pack-size=")
+ two_word_flags+=("--pack-size")
+ flags+=("--password-command=")
+ two_word_flags+=("--password-command")
+ flags+=("--password-file=")
+ two_word_flags+=("--password-file")
+ two_word_flags+=("-p")
+ flags+=("--quiet")
+ flags+=("-q")
+ flags+=("--repo=")
+ two_word_flags+=("--repo")
+ two_word_flags+=("-r")
+ flags+=("--repository-file=")
+ two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
+ flags+=("--tls-client-cert=")
+ two_word_flags+=("--tls-client-cert")
+ flags+=("--verbose")
+ flags+=("-v")
+
+ must_have_one_flag=()
+ must_have_one_noun=()
+ noun_aliases=()
+}
+
+_restic_repair_help()
+{
+ last_command="restic_repair_help"
+
+ command_aliases=()
+
+ commands=()
+
+ flags=()
+ two_word_flags=()
+ local_nonpersistent_flags=()
+ flags_with_completion=()
+ flags_completion=()
+
+ flags+=("--cacert=")
+ two_word_flags+=("--cacert")
+ flags+=("--cache-dir=")
+ two_word_flags+=("--cache-dir")
+ flags+=("--cleanup-cache")
+ flags+=("--compression=")
+ two_word_flags+=("--compression")
+ flags+=("--insecure-tls")
+ flags+=("--json")
+ flags+=("--key-hint=")
+ two_word_flags+=("--key-hint")
+ flags+=("--limit-download=")
+ two_word_flags+=("--limit-download")
+ flags+=("--limit-upload=")
+ two_word_flags+=("--limit-upload")
+ flags+=("--no-cache")
+ flags+=("--no-lock")
+ flags+=("--option=")
+ two_word_flags+=("--option")
+ two_word_flags+=("-o")
+ flags+=("--pack-size=")
+ two_word_flags+=("--pack-size")
+ flags+=("--password-command=")
+ two_word_flags+=("--password-command")
+ flags+=("--password-file=")
+ two_word_flags+=("--password-file")
+ two_word_flags+=("-p")
+ flags+=("--quiet")
+ flags+=("-q")
+ flags+=("--repo=")
+ two_word_flags+=("--repo")
+ two_word_flags+=("-r")
+ flags+=("--repository-file=")
+ two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
+ flags+=("--tls-client-cert=")
+ two_word_flags+=("--tls-client-cert")
+ flags+=("--verbose")
+ flags+=("-v")
+
+ must_have_one_flag=()
+ must_have_one_noun=()
+ has_completion_function=1
+ noun_aliases=()
+}
+
+_restic_repair_index()
+{
+ last_command="restic_repair_index"
command_aliases=()
@@ -1973,6 +2144,8 @@ _restic_rebuild-index()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
@@ -1983,13 +2156,100 @@ _restic_rebuild-index()
noun_aliases=()
}
-_restic_recover()
+_restic_repair_snapshots()
{
- last_command="restic_recover"
+ last_command="restic_repair_snapshots"
+
+ command_aliases=()
+
+ commands=()
+
+ flags=()
+ two_word_flags=()
+ local_nonpersistent_flags=()
+ flags_with_completion=()
+ flags_completion=()
+
+ flags+=("--dry-run")
+ flags+=("-n")
+ local_nonpersistent_flags+=("--dry-run")
+ local_nonpersistent_flags+=("-n")
+ flags+=("--forget")
+ local_nonpersistent_flags+=("--forget")
+ flags+=("--help")
+ flags+=("-h")
+ local_nonpersistent_flags+=("--help")
+ local_nonpersistent_flags+=("-h")
+ flags+=("--host=")
+ two_word_flags+=("--host")
+ two_word_flags+=("-H")
+ local_nonpersistent_flags+=("--host")
+ local_nonpersistent_flags+=("--host=")
+ local_nonpersistent_flags+=("-H")
+ flags+=("--path=")
+ two_word_flags+=("--path")
+ local_nonpersistent_flags+=("--path")
+ local_nonpersistent_flags+=("--path=")
+ flags+=("--tag=")
+ two_word_flags+=("--tag")
+ local_nonpersistent_flags+=("--tag")
+ local_nonpersistent_flags+=("--tag=")
+ flags+=("--cacert=")
+ two_word_flags+=("--cacert")
+ flags+=("--cache-dir=")
+ two_word_flags+=("--cache-dir")
+ flags+=("--cleanup-cache")
+ flags+=("--compression=")
+ two_word_flags+=("--compression")
+ flags+=("--insecure-tls")
+ flags+=("--json")
+ flags+=("--key-hint=")
+ two_word_flags+=("--key-hint")
+ flags+=("--limit-download=")
+ two_word_flags+=("--limit-download")
+ flags+=("--limit-upload=")
+ two_word_flags+=("--limit-upload")
+ flags+=("--no-cache")
+ flags+=("--no-lock")
+ flags+=("--option=")
+ two_word_flags+=("--option")
+ two_word_flags+=("-o")
+ flags+=("--pack-size=")
+ two_word_flags+=("--pack-size")
+ flags+=("--password-command=")
+ two_word_flags+=("--password-command")
+ flags+=("--password-file=")
+ two_word_flags+=("--password-file")
+ two_word_flags+=("-p")
+ flags+=("--quiet")
+ flags+=("-q")
+ flags+=("--repo=")
+ two_word_flags+=("--repo")
+ two_word_flags+=("-r")
+ flags+=("--repository-file=")
+ two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
+ flags+=("--tls-client-cert=")
+ two_word_flags+=("--tls-client-cert")
+ flags+=("--verbose")
+ flags+=("-v")
+
+ must_have_one_flag=()
+ must_have_one_noun=()
+ noun_aliases=()
+}
+
+_restic_repair()
+{
+ last_command="restic_repair"
command_aliases=()
commands=()
+ commands+=("help")
+ commands+=("index")
+ commands+=("snapshots")
flags=()
two_word_flags=()
@@ -2035,6 +2295,8 @@ _restic_recover()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
@@ -2141,6 +2403,8 @@ _restic_restore()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
@@ -2241,6 +2505,8 @@ _restic_rewrite()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
@@ -2307,6 +2573,8 @@ _restic_self-update()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
@@ -2397,6 +2665,8 @@ _restic_snapshots()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
@@ -2477,6 +2747,8 @@ _restic_stats()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
@@ -2565,6 +2837,8 @@ _restic_tag()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
@@ -2629,6 +2903,8 @@ _restic_unlock()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
@@ -2691,6 +2967,8 @@ _restic_version()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
@@ -2726,8 +3004,8 @@ _restic_root_command()
commands+=("migrate")
commands+=("mount")
commands+=("prune")
- commands+=("rebuild-index")
commands+=("recover")
+ commands+=("repair")
commands+=("restore")
commands+=("rewrite")
commands+=("self-update")
@@ -2781,6 +3059,8 @@ _restic_root_command()
two_word_flags+=("-r")
flags+=("--repository-file=")
two_word_flags+=("--repository-file")
+ flags+=("--retry-lock=")
+ two_word_flags+=("--retry-lock")
flags+=("--tls-client-cert=")
two_word_flags+=("--tls-client-cert")
flags+=("--verbose")
diff --git a/doc/conf.py b/doc/conf.py
index 3c0af927b..3fd8dc119 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -106,5 +106,5 @@ html_static_path = ['_static']
htmlhelp_basename = 'resticdoc'
extlinks = {
- 'issue': ('https://github.com/restic/restic/issues/%s', '#'),
+ 'issue': ('https://github.com/restic/restic/issues/%s', '#%s'),
}
diff --git a/doc/design.rst b/doc/design.rst
index 3e25a0852..a58f803ea 100644
--- a/doc/design.rst
+++ b/doc/design.rst
@@ -45,10 +45,12 @@ comparing its output to the file name. If the prefix of a filename is
unique amongst all the other files in the same directory, the prefix may
be used instead of the complete filename.
-Apart from the files stored within the ``keys`` directory, all files are
-encrypted with AES-256 in counter mode (CTR). The integrity of the
-encrypted data is secured by a Poly1305-AES message authentication code
-(sometimes also referred to as a "signature").
+Apart from the files stored within the ``keys`` and ``data`` directories,
+all files are encrypted with AES-256 in counter mode (CTR). The integrity
+of the encrypted data is secured by a Poly1305-AES message authentication
+code (sometimes also referred to as a "signature").
+Files in the ``data`` directory ("pack files") consist of multiple parts
+which are all independently encrypted and authenticated, see below.
In the first 16 bytes of each encrypted file the initialisation vector
(IV) is stored. It is followed by the encrypted data and completed by
@@ -276,7 +278,7 @@ of a JSON document like the following:
},
{
"id": "9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae",
- "type": "tree",
+ "type": "data",
"offset": 38,
"length": 112,
"uncompressed_length": 511,
@@ -298,8 +300,8 @@ example, the Pack ``73d04e61`` contains two data Blobs and one Tree
blob, the plaintext hashes are listed afterwards. The ``length`` field
corresponds to ``Length(encrypted_blob)`` in the pack file header.
Field ``uncompressed_length`` is only present for compressed blobs and
-therefore is never present in version 1. It is set to the value of
-``Length(blob)``.
+therefore is never present in version 1 of the repository format. It is
+set to the value of ``Length(blob)``.
The field ``supersedes`` lists the storage IDs of index files that have
been replaced with the current index file. This happens when index files
@@ -410,7 +412,9 @@ and pretty-print the contents of a snapshot file:
{
"time": "2015-01-02T18:10:50.895208559+01:00",
"tree": "2da81727b6585232894cfbb8f8bdab8d1eccd3d8f7c92bc934d62e62e618ffdf",
- "dir": "/tmp/testdata",
+ "paths": [
+ "/tmp/testdata"
+ ],
"hostname": "kasimir",
"username": "fd0",
"uid": 1000,
@@ -436,7 +440,9 @@ becomes:
{
"time": "2015-01-02T18:10:50.895208559+01:00",
"tree": "2da81727b6585232894cfbb8f8bdab8d1eccd3d8f7c92bc934d62e62e618ffdf",
- "dir": "/tmp/testdata",
+ "paths": [
+ "/tmp/testdata"
+ ],
"hostname": "kasimir",
"username": "fd0",
"uid": 1000,
@@ -495,16 +501,25 @@ the JSON is indented):
}
A tree contains a list of entries (in the field ``nodes``) which contain
-meta data like a name and timestamps. When the entry references a
-directory, the field ``subtree`` contains the plain text ID of another
-tree object.
+meta data like a name and timestamps. Note that there are some specialties of how
+this metadata is generated:
+
+- The name is quoted using `strconv.Quote <https://pkg.go.dev/strconv#Quote>`__
+ before being saved. This handles non-unicode names, but also changes the
+ representation of names containing ``"`` or ``\``.
+
+- The filemode saved is the mode defined by `fs.FileMode <https://pkg.go.dev/io/fs#FileMode>`__
+ masked by ``os.ModePerm | os.ModeType | os.ModeSetuid | os.ModeSetgid | os.ModeSticky``
+
+When the entry references a directory, the field ``subtree`` contains the plain text
+ID of another tree object.
When the command ``restic cat blob`` is used, the plaintext ID is needed
to print a tree. The tree referenced above can be dumped as follows:
.. code-block:: console
- $ restic -r /tmp/restic-repo cat blob b26e315b0988ddcd1cee64c351d13a100fedbc9fdbb144a67d1b765ab280b4dc
+ $ restic -r /tmp/restic-repo cat blob b26e315b0988ddcd1cee64c351d13a100fedbc9fdbb144a67d1b765ab280b4dc | jq .
enter password for repository:
{
"nodes": [
@@ -533,6 +548,39 @@ This tree contains a file entry. This time, the ``subtree`` field is not
present and the ``content`` field contains a list with one plain text
SHA-256 hash.
+A symlink uses the following data structure:
+
+.. code-block:: console
+
+ $ restic -r /tmp/restic-repo cat blob 4c0a7d500bd1482ba01752e77c8d5a923304777d96b6522fae7c11e99b4e6fa6 | jq .
+ enter password for repository:
+ {
+ "nodes": [
+ {
+ "name": "testlink",
+ "type": "symlink",
+ "mode": 134218239,
+ "mtime": "2023-07-25T20:01:44.007465374+02:00",
+ "atime": "2023-07-25T20:01:44.007465374+02:00",
+ "ctime": "2023-07-25T20:01:44.007465374+02:00",
+ "uid": 1000,
+ "gid": 100,
+ "user": "fd0",
+ "inode": 33734827,
+ "links": 1,
+ "linktarget": "example_target",
+ "content": null
+ },
+ [...]
+ ]
+ }
+
+The symlink target is stored in the field `linktarget`. As JSON strings can
+only contain valid unicode, an exception applies if the `linktarget` is not a
+valid UTF-8 string. Since restic 0.16.0, in such a case the `linktarget_raw`
+field contains a base64 encoded version of the raw linktarget. The
+`linktarget_raw` field is only set if `linktarget` cannot be encoded correctly.
+
The command ``restic cat blob`` can also be used to extract and decrypt
data given a plaintext ID, e.g. for the data mentioned above:
@@ -588,7 +636,10 @@ that the process is dead and considers the lock to be stale.
When a new lock is to be created and no other conflicting locks are
detected, restic creates a new lock, waits, and checks if other locks
appeared in the repository. Depending on the type of the other locks and
-the lock to be created, restic either continues or fails.
+the lock to be created, restic either continues or fails. If the
+``--retry-lock`` option is specified, restic will retry
+creating the lock periodically until it succeeds or the specified
+timeout expires.
Read and Write Ordering
=======================
diff --git a/doc/developer_information.rst b/doc/developer_information.rst
index c05edc9d2..9de517901 100644
--- a/doc/developer_information.rst
+++ b/doc/developer_information.rst
@@ -10,7 +10,7 @@ refer to the documentation for the respective version. The binary produced
depends on the following things:
* The source code for the release
- * The exact version of the official `Go compiler <https://golang.org>`__ used to produce the binaries (running ``restic version`` will print this)
+ * The exact version of the official `Go compiler <https://go.dev>`__ used to produce the binaries (running ``restic version`` will print this)
* The architecture and operating system the Go compiler runs on (Linux, ``amd64``)
* The build tags (for official binaries, it's the tag ``selfupdate``)
* The path where the source code is extracted to (``/restic``)
@@ -127,3 +127,5 @@ required argument is the new version number (in `Semantic Versioning
go run helpers/prepare-release/main.go 0.14.0
Checks can be skipped on demand via flags, please see ``--help`` for details.
+
+The build process requires ``docker``, ``docker-buildx`` and ``qemu-user-static-binfmt``.
diff --git a/doc/fish-completion.fish b/doc/fish-completion.fish
index aa60d536d..f9d7801e1 100644
--- a/doc/fish-completion.fish
+++ b/doc/fish-completion.fish
@@ -55,6 +55,60 @@ function __restic_perform_completion
printf "%s\n" "$directiveLine"
end
+# this function limits calls to __restic_perform_completion, by caching the result behind $__restic_perform_completion_once_result
+function __restic_perform_completion_once
+ __restic_debug "Starting __restic_perform_completion_once"
+
+ if test -n "$__restic_perform_completion_once_result"
+ __restic_debug "Seems like a valid result already exists, skipping __restic_perform_completion"
+ return 0
+ end
+
+ set --global __restic_perform_completion_once_result (__restic_perform_completion)
+ if test -z "$__restic_perform_completion_once_result"
+ __restic_debug "No completions, probably due to a failure"
+ return 1
+ end
+
+ __restic_debug "Performed completions and set __restic_perform_completion_once_result"
+ return 0
+end
+
+# this function is used to clear the $__restic_perform_completion_once_result variable after completions are run
+function __restic_clear_perform_completion_once_result
+ __restic_debug ""
+ __restic_debug "========= clearing previously set __restic_perform_completion_once_result variable =========="
+ set --erase __restic_perform_completion_once_result
+ __restic_debug "Succesfully erased the variable __restic_perform_completion_once_result"
+end
+
+function __restic_requires_order_preservation
+ __restic_debug ""
+ __restic_debug "========= checking if order preservation is required =========="
+
+ __restic_perform_completion_once
+ if test -z "$__restic_perform_completion_once_result"
+ __restic_debug "Error determining if order preservation is required"
+ return 1
+ end
+
+ set -l directive (string sub --start 2 $__restic_perform_completion_once_result[-1])
+ __restic_debug "Directive is: $directive"
+
+ set -l shellCompDirectiveKeepOrder 32
+ set -l keeporder (math (math --scale 0 $directive / $shellCompDirectiveKeepOrder) % 2)
+ __restic_debug "Keeporder is: $keeporder"
+
+ if test $keeporder -ne 0
+ __restic_debug "This does require order preservation"
+ return 0
+ end
+
+ __restic_debug "This doesn't require order preservation"
+ return 1
+end
+
+
# This function does two things:
# - Obtain the completions and store them in the global __restic_comp_results
# - Return false if file completion should be performed
@@ -65,17 +119,17 @@ function __restic_prepare_completions
# Start fresh
set --erase __restic_comp_results
- set -l results (__restic_perform_completion)
- __restic_debug "Completion results: $results"
+ __restic_perform_completion_once
+ __restic_debug "Completion results: $__restic_perform_completion_once_result"
- if test -z "$results"
+ if test -z "$__restic_perform_completion_once_result"
__restic_debug "No completion, probably due to a failure"
# Might as well do file completion, in case it helps
return 1
end
- set -l directive (string sub --start 2 $results[-1])
- set --global __restic_comp_results $results[1..-2]
+ set -l directive (string sub --start 2 $__restic_perform_completion_once_result[-1])
+ set --global __restic_comp_results $__restic_perform_completion_once_result[1..-2]
__restic_debug "Completions are: $__restic_comp_results"
__restic_debug "Directive is: $directive"
@@ -171,7 +225,11 @@ end
# Remove any pre-existing completions for the program since we will be handling all of them.
complete -c restic -e
+# this will get called after the two calls below and clear the $__restic_perform_completion_once_result global
+complete -c restic -n '__restic_clear_perform_completion_once_result'
# The call to __restic_prepare_completions will setup __restic_comp_results
# which provides the program's completion choices.
-complete -c restic -n '__restic_prepare_completions' -f -a '$__restic_comp_results'
-
+# If this doesn't require order preservation, we don't use the -k flag
+complete -c restic -n 'not __restic_requires_order_preservation && __restic_prepare_completions' -f -a '$__restic_comp_results'
+# otherwise we use the -k flag
+complete -k -c restic -n '__restic_requires_order_preservation && __restic_prepare_completions' -f -a '$__restic_comp_results'
diff --git a/doc/index.rst b/doc/index.rst
index 034dbda23..8b72dcf58 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -14,6 +14,7 @@ Restic Documentation
060_forget
070_encryption
075_scripting
+ 077_troubleshooting
080_examples
090_participating
100_references
diff --git a/doc/man/restic-backup.1 b/doc/man/restic-backup.1
index 4297c3b8e..a07d19434 100644
--- a/doc/man/restic-backup.1
+++ b/doc/man/restic-backup.1
@@ -66,6 +66,10 @@ Exit status is 3 if some source data could not be read (incomplete snapshot crea
force re-reading the target files/directories (overrides the "parent" flag)
.PP
+\fB-g\fP, \fB--group-by\fP=host,paths
+ \fB\fCgroup\fR snapshots by host, paths and/or tags, separated by comma (disable grouping with '')
+
+.PP
\fB-h\fP, \fB--help\fP[=false]
help for backup
@@ -99,7 +103,7 @@ Exit status is 3 if some source data could not be read (incomplete snapshot crea
.PP
\fB--parent\fP=""
- use this parent \fB\fCsnapshot\fR (default: last snapshot in the repository that has the same target files/directories, and is not newer than the snapshot time)
+ use this parent \fB\fCsnapshot\fR (default: latest snapshot in the group determined by --group-by and not newer than the timestamp determined by --time)
.PP
\fB--read-concurrency\fP=0
@@ -129,7 +133,7 @@ Exit status is 3 if some source data could not be read (incomplete snapshot crea
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
\fB--cacert\fP=[]
- \fB\fCfile\fR to load root certificates from (default: use system certificates)
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
.PP
\fB--cache-dir\fP=""
@@ -141,7 +145,7 @@ Exit status is 3 if some source data could not be read (incomplete snapshot crea
.PP
\fB--compression\fP=auto
- compression mode (only available for repository format version 2), one of (auto|off|max)
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
.PP
\fB--insecure-tls\fP[=false]
@@ -200,8 +204,12 @@ Exit status is 3 if some source data could not be read (incomplete snapshot crea
\fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
\fB--tls-client-cert\fP=""
- path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
.PP
\fB-v\fP, \fB--verbose\fP[=0]
diff --git a/doc/man/restic-cache.1 b/doc/man/restic-cache.1
index 3552fb1dc..a6ae75e31 100644
--- a/doc/man/restic-cache.1
+++ b/doc/man/restic-cache.1
@@ -42,7 +42,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
\fB--cacert\fP=[]
- \fB\fCfile\fR to load root certificates from (default: use system certificates)
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
.PP
\fB--cache-dir\fP=""
@@ -54,7 +54,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.PP
\fB--compression\fP=auto
- compression mode (only available for repository format version 2), one of (auto|off|max)
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
.PP
\fB--insecure-tls\fP[=false]
@@ -113,8 +113,12 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
\fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
\fB--tls-client-cert\fP=""
- path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
.PP
\fB-v\fP, \fB--verbose\fP[=0]
diff --git a/doc/man/restic-cat.1 b/doc/man/restic-cat.1
index 2e787fa06..08170582d 100644
--- a/doc/man/restic-cat.1
+++ b/doc/man/restic-cat.1
@@ -8,7 +8,7 @@ restic-cat - Print internal objects to stdout
.SH SYNOPSIS
.PP
-\fBrestic cat [flags] [pack|blob|snapshot|index|key|masterkey|config|lock] ID\fP
+\fBrestic cat [flags] [masterkey|config|pack ID|blob ID|snapshot ID|index ID|key ID|lock ID|tree snapshot:subfolder]\fP
.SH DESCRIPTION
@@ -30,7 +30,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
\fB--cacert\fP=[]
- \fB\fCfile\fR to load root certificates from (default: use system certificates)
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
.PP
\fB--cache-dir\fP=""
@@ -42,7 +42,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.PP
\fB--compression\fP=auto
- compression mode (only available for repository format version 2), one of (auto|off|max)
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
.PP
\fB--insecure-tls\fP[=false]
@@ -101,8 +101,12 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
\fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
\fB--tls-client-cert\fP=""
- path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
.PP
\fB-v\fP, \fB--verbose\fP[=0]
diff --git a/doc/man/restic-check.1 b/doc/man/restic-check.1
index e641fc2b5..0f7a594cd 100644
--- a/doc/man/restic-check.1
+++ b/doc/man/restic-check.1
@@ -41,13 +41,13 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.PP
\fB--with-cache\fP[=false]
- use the cache
+ use existing cache, only read uncached data from repository
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
\fB--cacert\fP=[]
- \fB\fCfile\fR to load root certificates from (default: use system certificates)
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
.PP
\fB--cache-dir\fP=""
@@ -59,7 +59,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.PP
\fB--compression\fP=auto
- compression mode (only available for repository format version 2), one of (auto|off|max)
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
.PP
\fB--insecure-tls\fP[=false]
@@ -118,8 +118,12 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
\fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
\fB--tls-client-cert\fP=""
- path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
.PP
\fB-v\fP, \fB--verbose\fP[=0]
diff --git a/doc/man/restic-copy.1 b/doc/man/restic-copy.1
index 53badecc9..1dbfae0f3 100644
--- a/doc/man/restic-copy.1
+++ b/doc/man/restic-copy.1
@@ -71,7 +71,7 @@ new destination repository using the "init" command.
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
\fB--cacert\fP=[]
- \fB\fCfile\fR to load root certificates from (default: use system certificates)
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
.PP
\fB--cache-dir\fP=""
@@ -83,7 +83,7 @@ new destination repository using the "init" command.
.PP
\fB--compression\fP=auto
- compression mode (only available for repository format version 2), one of (auto|off|max)
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
.PP
\fB--insecure-tls\fP[=false]
@@ -142,8 +142,12 @@ new destination repository using the "init" command.
\fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
\fB--tls-client-cert\fP=""
- path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
.PP
\fB-v\fP, \fB--verbose\fP[=0]
diff --git a/doc/man/restic-diff.1 b/doc/man/restic-diff.1
index 31c34dc8a..15f7c6b9f 100644
--- a/doc/man/restic-diff.1
+++ b/doc/man/restic-diff.1
@@ -50,7 +50,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
\fB--cacert\fP=[]
- \fB\fCfile\fR to load root certificates from (default: use system certificates)
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
.PP
\fB--cache-dir\fP=""
@@ -62,7 +62,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.PP
\fB--compression\fP=auto
- compression mode (only available for repository format version 2), one of (auto|off|max)
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
.PP
\fB--insecure-tls\fP[=false]
@@ -121,8 +121,12 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
\fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
\fB--tls-client-cert\fP=""
- path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
.PP
\fB-v\fP, \fB--verbose\fP[=0]
diff --git a/doc/man/restic-dump.1 b/doc/man/restic-dump.1
index 61b3b3ec8..d06800e4b 100644
--- a/doc/man/restic-dump.1
+++ b/doc/man/restic-dump.1
@@ -53,7 +53,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
\fB--cacert\fP=[]
- \fB\fCfile\fR to load root certificates from (default: use system certificates)
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
.PP
\fB--cache-dir\fP=""
@@ -65,7 +65,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.PP
\fB--compression\fP=auto
- compression mode (only available for repository format version 2), one of (auto|off|max)
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
.PP
\fB--insecure-tls\fP[=false]
@@ -124,8 +124,12 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
\fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
\fB--tls-client-cert\fP=""
- path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
.PP
\fB-v\fP, \fB--verbose\fP[=0]
diff --git a/doc/man/restic-find.1 b/doc/man/restic-find.1
index 9fa4dd811..5038a72e7 100644
--- a/doc/man/restic-find.1
+++ b/doc/man/restic-find.1
@@ -32,6 +32,10 @@ It can also be used to search for restic blobs or trees for troubleshooting.
only consider snapshots for this \fB\fChost\fR (can be specified multiple times)
.PP
+\fB--human-readable\fP[=false]
+ print sizes in human readable format
+
+.PP
\fB-i\fP, \fB--ignore-case\fP[=false]
ignore case for pattern
@@ -75,7 +79,7 @@ It can also be used to search for restic blobs or trees for troubleshooting.
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
\fB--cacert\fP=[]
- \fB\fCfile\fR to load root certificates from (default: use system certificates)
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
.PP
\fB--cache-dir\fP=""
@@ -87,7 +91,7 @@ It can also be used to search for restic blobs or trees for troubleshooting.
.PP
\fB--compression\fP=auto
- compression mode (only available for repository format version 2), one of (auto|off|max)
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
.PP
\fB--insecure-tls\fP[=false]
@@ -146,8 +150,12 @@ It can also be used to search for restic blobs or trees for troubleshooting.
\fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
\fB--tls-client-cert\fP=""
- path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
.PP
\fB-v\fP, \fB--verbose\fP[=0]
diff --git a/doc/man/restic-forget.1 b/doc/man/restic-forget.1
index d8a69856e..6920d1bba 100644
--- a/doc/man/restic-forget.1
+++ b/doc/man/restic-forget.1
@@ -35,27 +35,27 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.SH OPTIONS
.PP
\fB-l\fP, \fB--keep-last\fP=0
- keep the last \fB\fCn\fR snapshots
+ keep the last \fB\fCn\fR snapshots (use 'unlimited' to keep all snapshots)
.PP
\fB-H\fP, \fB--keep-hourly\fP=0
- keep the last \fB\fCn\fR hourly snapshots
+ keep the last \fB\fCn\fR hourly snapshots (use 'unlimited' to keep all hourly snapshots)
.PP
\fB-d\fP, \fB--keep-daily\fP=0
- keep the last \fB\fCn\fR daily snapshots
+ keep the last \fB\fCn\fR daily snapshots (use 'unlimited' to keep all daily snapshots)
.PP
\fB-w\fP, \fB--keep-weekly\fP=0
- keep the last \fB\fCn\fR weekly snapshots
+ keep the last \fB\fCn\fR weekly snapshots (use 'unlimited' to keep all weekly snapshots)
.PP
\fB-m\fP, \fB--keep-monthly\fP=0
- keep the last \fB\fCn\fR monthly snapshots
+ keep the last \fB\fCn\fR monthly snapshots (use 'unlimited' to keep all monthly snapshots)
.PP
\fB-y\fP, \fB--keep-yearly\fP=0
- keep the last \fB\fCn\fR yearly snapshots
+ keep the last \fB\fCn\fR yearly snapshots (use 'unlimited' to keep all yearly snapshots)
.PP
\fB--keep-within\fP=
@@ -102,7 +102,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
use compact output format
.PP
-\fB-g\fP, \fB--group-by\fP="host,paths"
+\fB-g\fP, \fB--group-by\fP=host,paths
\fB\fCgroup\fR snapshots by host, paths and/or tags, separated by comma (disable grouping with '')
.PP
@@ -141,7 +141,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
\fB--cacert\fP=[]
- \fB\fCfile\fR to load root certificates from (default: use system certificates)
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
.PP
\fB--cache-dir\fP=""
@@ -153,7 +153,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.PP
\fB--compression\fP=auto
- compression mode (only available for repository format version 2), one of (auto|off|max)
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
.PP
\fB--insecure-tls\fP[=false]
@@ -212,8 +212,12 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
\fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
\fB--tls-client-cert\fP=""
- path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
.PP
\fB-v\fP, \fB--verbose\fP[=0]
diff --git a/doc/man/restic-generate.1 b/doc/man/restic-generate.1
index 6b54ebfca..a557ebd9a 100644
--- a/doc/man/restic-generate.1
+++ b/doc/man/restic-generate.1
@@ -51,7 +51,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
\fB--cacert\fP=[]
- \fB\fCfile\fR to load root certificates from (default: use system certificates)
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
.PP
\fB--cache-dir\fP=""
@@ -63,7 +63,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.PP
\fB--compression\fP=auto
- compression mode (only available for repository format version 2), one of (auto|off|max)
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
.PP
\fB--insecure-tls\fP[=false]
@@ -122,8 +122,12 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
\fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
\fB--tls-client-cert\fP=""
- path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
.PP
\fB-v\fP, \fB--verbose\fP[=0]
diff --git a/doc/man/restic-init.1 b/doc/man/restic-init.1
index 194f31756..7916b6162 100644
--- a/doc/man/restic-init.1
+++ b/doc/man/restic-init.1
@@ -58,7 +58,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
\fB--cacert\fP=[]
- \fB\fCfile\fR to load root certificates from (default: use system certificates)
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
.PP
\fB--cache-dir\fP=""
@@ -70,7 +70,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.PP
\fB--compression\fP=auto
- compression mode (only available for repository format version 2), one of (auto|off|max)
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
.PP
\fB--insecure-tls\fP[=false]
@@ -129,8 +129,12 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
\fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
\fB--tls-client-cert\fP=""
- path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
.PP
\fB-v\fP, \fB--verbose\fP[=0]
diff --git a/doc/man/restic-key.1 b/doc/man/restic-key.1
index 4163cefa5..a5e7a5421 100644
--- a/doc/man/restic-key.1
+++ b/doc/man/restic-key.1
@@ -42,7 +42,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
\fB--cacert\fP=[]
- \fB\fCfile\fR to load root certificates from (default: use system certificates)
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
.PP
\fB--cache-dir\fP=""
@@ -54,7 +54,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.PP
\fB--compression\fP=auto
- compression mode (only available for repository format version 2), one of (auto|off|max)
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
.PP
\fB--insecure-tls\fP[=false]
@@ -113,8 +113,12 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
\fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
\fB--tls-client-cert\fP=""
- path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
.PP
\fB-v\fP, \fB--verbose\fP[=0]
diff --git a/doc/man/restic-list.1 b/doc/man/restic-list.1
index 6683e2c47..48ca94274 100644
--- a/doc/man/restic-list.1
+++ b/doc/man/restic-list.1
@@ -30,7 +30,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
\fB--cacert\fP=[]
- \fB\fCfile\fR to load root certificates from (default: use system certificates)
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
.PP
\fB--cache-dir\fP=""
@@ -42,7 +42,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.PP
\fB--compression\fP=auto
- compression mode (only available for repository format version 2), one of (auto|off|max)
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
.PP
\fB--insecure-tls\fP[=false]
@@ -101,8 +101,12 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
\fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
\fB--tls-client-cert\fP=""
- path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
.PP
\fB-v\fP, \fB--verbose\fP[=0]
diff --git a/doc/man/restic-ls.1 b/doc/man/restic-ls.1
index a16716434..1df321132 100644
--- a/doc/man/restic-ls.1
+++ b/doc/man/restic-ls.1
@@ -46,6 +46,10 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
only consider snapshots for this \fB\fChost\fR, when snapshot ID "latest" is given (can be specified multiple times)
.PP
+\fB--human-readable\fP[=false]
+ print sizes in human readable format
+
+.PP
\fB-l\fP, \fB--long\fP[=false]
use a long listing format showing size and mode
@@ -65,7 +69,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
\fB--cacert\fP=[]
- \fB\fCfile\fR to load root certificates from (default: use system certificates)
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
.PP
\fB--cache-dir\fP=""
@@ -77,7 +81,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.PP
\fB--compression\fP=auto
- compression mode (only available for repository format version 2), one of (auto|off|max)
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
.PP
\fB--insecure-tls\fP[=false]
@@ -136,8 +140,12 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
\fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
\fB--tls-client-cert\fP=""
- path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
.PP
\fB-v\fP, \fB--verbose\fP[=0]
diff --git a/doc/man/restic-migrate.1 b/doc/man/restic-migrate.1
index d8127090e..63aa784ea 100644
--- a/doc/man/restic-migrate.1
+++ b/doc/man/restic-migrate.1
@@ -36,7 +36,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
\fB--cacert\fP=[]
- \fB\fCfile\fR to load root certificates from (default: use system certificates)
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
.PP
\fB--cache-dir\fP=""
@@ -48,7 +48,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.PP
\fB--compression\fP=auto
- compression mode (only available for repository format version 2), one of (auto|off|max)
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
.PP
\fB--insecure-tls\fP[=false]
@@ -107,8 +107,12 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
\fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
\fB--tls-client-cert\fP=""
- path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
.PP
\fB-v\fP, \fB--verbose\fP[=0]
diff --git a/doc/man/restic-mount.1 b/doc/man/restic-mount.1
index ce4f893a7..a6ffa2d67 100644
--- a/doc/man/restic-mount.1
+++ b/doc/man/restic-mount.1
@@ -114,7 +114,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
\fB--cacert\fP=[]
- \fB\fCfile\fR to load root certificates from (default: use system certificates)
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
.PP
\fB--cache-dir\fP=""
@@ -126,7 +126,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.PP
\fB--compression\fP=auto
- compression mode (only available for repository format version 2), one of (auto|off|max)
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
.PP
\fB--insecure-tls\fP[=false]
@@ -185,8 +185,12 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
\fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
\fB--tls-client-cert\fP=""
- path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
.PP
\fB-v\fP, \fB--verbose\fP[=0]
diff --git a/doc/man/restic-prune.1 b/doc/man/restic-prune.1
index 197cb1130..7f01a1e8f 100644
--- a/doc/man/restic-prune.1
+++ b/doc/man/restic-prune.1
@@ -59,7 +59,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
\fB--cacert\fP=[]
- \fB\fCfile\fR to load root certificates from (default: use system certificates)
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
.PP
\fB--cache-dir\fP=""
@@ -71,7 +71,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.PP
\fB--compression\fP=auto
- compression mode (only available for repository format version 2), one of (auto|off|max)
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
.PP
\fB--insecure-tls\fP[=false]
@@ -130,8 +130,12 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
\fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
\fB--tls-client-cert\fP=""
- path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
.PP
\fB-v\fP, \fB--verbose\fP[=0]
diff --git a/doc/man/restic-recover.1 b/doc/man/restic-recover.1
index aa3441156..4e9e1c92f 100644
--- a/doc/man/restic-recover.1
+++ b/doc/man/restic-recover.1
@@ -32,7 +32,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
\fB--cacert\fP=[]
- \fB\fCfile\fR to load root certificates from (default: use system certificates)
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
.PP
\fB--cache-dir\fP=""
@@ -44,7 +44,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.PP
\fB--compression\fP=auto
- compression mode (only available for repository format version 2), one of (auto|off|max)
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
.PP
\fB--insecure-tls\fP[=false]
@@ -103,8 +103,12 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
\fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
\fB--tls-client-cert\fP=""
- path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
.PP
\fB-v\fP, \fB--verbose\fP[=0]
diff --git a/doc/man/restic-rebuild-index.1 b/doc/man/restic-repair-index.1
index 18878b66f..773f44a42 100644
--- a/doc/man/restic-rebuild-index.1
+++ b/doc/man/restic-repair-index.1
@@ -3,17 +3,17 @@
.SH NAME
.PP
-restic-rebuild-index - Build a new index
+restic-repair-index - Build a new index
.SH SYNOPSIS
.PP
-\fBrestic rebuild-index [flags]\fP
+\fBrestic repair index [flags]\fP
.SH DESCRIPTION
.PP
-The "rebuild-index" command creates a new index based on the pack files in the
+The "repair index" command creates a new index based on the pack files in the
repository.
@@ -25,7 +25,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.SH OPTIONS
.PP
\fB-h\fP, \fB--help\fP[=false]
- help for rebuild-index
+ help for index
.PP
\fB--read-all-packs\fP[=false]
@@ -35,7 +35,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
\fB--cacert\fP=[]
- \fB\fCfile\fR to load root certificates from (default: use system certificates)
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
.PP
\fB--cache-dir\fP=""
@@ -47,7 +47,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.PP
\fB--compression\fP=auto
- compression mode (only available for repository format version 2), one of (auto|off|max)
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
.PP
\fB--insecure-tls\fP[=false]
@@ -106,8 +106,12 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
\fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
\fB--tls-client-cert\fP=""
- path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
.PP
\fB-v\fP, \fB--verbose\fP[=0]
@@ -116,4 +120,4 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.SH SEE ALSO
.PP
-\fBrestic(1)\fP
+\fBrestic-repair(1)\fP
diff --git a/doc/man/restic-repair-snapshots.1 b/doc/man/restic-repair-snapshots.1
new file mode 100644
index 000000000..45acf8e7b
--- /dev/null
+++ b/doc/man/restic-repair-snapshots.1
@@ -0,0 +1,157 @@
+.nh
+.TH "restic backup" "1" "Jan 2017" "generated by \fB\fCrestic generate\fR" ""
+
+.SH NAME
+.PP
+restic-repair-snapshots - Repair snapshots
+
+
+.SH SYNOPSIS
+.PP
+\fBrestic repair snapshots [flags] [snapshot ID] [...]\fP
+
+
+.SH DESCRIPTION
+.PP
+The "repair snapshots" command repairs broken snapshots. It scans the given
+snapshots and generates new ones with damaged directories and file contents
+removed. If the broken snapshots are deleted, a prune run will be able to
+clean up the repository.
+
+.PP
+The command depends on a correct index, thus make sure to run "repair index"
+first!
+
+
+.SH WARNING
+.PP
+Repairing and deleting broken snapshots causes data loss! It will remove broken
+directories and modify broken files in the modified snapshots.
+
+.PP
+If the contents of directories and files are still available, the better option
+is to run "backup" which in that case is able to heal existing snapshots. Only
+use the "repair snapshots" command if you need to recover an old and broken
+snapshot!
+
+
+.SH EXIT STATUS
+.PP
+Exit status is 0 if the command was successful, and non-zero if there was any error.
+
+
+.SH OPTIONS
+.PP
+\fB-n\fP, \fB--dry-run\fP[=false]
+ do not do anything, just print what would be done
+
+.PP
+\fB--forget\fP[=false]
+ remove original snapshots after creating new ones
+
+.PP
+\fB-h\fP, \fB--help\fP[=false]
+ help for snapshots
+
+.PP
+\fB-H\fP, \fB--host\fP=[]
+ only consider snapshots for this \fB\fChost\fR (can be specified multiple times)
+
+.PP
+\fB--path\fP=[]
+ only consider snapshots including this (absolute) \fB\fCpath\fR (can be specified multiple times)
+
+.PP
+\fB--tag\fP=[]
+ only consider snapshots including \fB\fCtag[,tag,...]\fR (can be specified multiple times)
+
+
+.SH OPTIONS INHERITED FROM PARENT COMMANDS
+.PP
+\fB--cacert\fP=[]
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
+
+.PP
+\fB--cache-dir\fP=""
+ set the cache \fB\fCdirectory\fR\&. (default: use system default cache directory)
+
+.PP
+\fB--cleanup-cache\fP[=false]
+ auto remove old cache directories
+
+.PP
+\fB--compression\fP=auto
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
+
+.PP
+\fB--insecure-tls\fP[=false]
+ skip TLS certificate verification when connecting to the repository (insecure)
+
+.PP
+\fB--json\fP[=false]
+ set output mode to JSON for commands that support it
+
+.PP
+\fB--key-hint\fP=""
+ \fB\fCkey\fR ID of key to try decrypting first (default: $RESTIC_KEY_HINT)
+
+.PP
+\fB--limit-download\fP=0
+ limits downloads to a maximum \fB\fCrate\fR in KiB/s. (default: unlimited)
+
+.PP
+\fB--limit-upload\fP=0
+ limits uploads to a maximum \fB\fCrate\fR in KiB/s. (default: unlimited)
+
+.PP
+\fB--no-cache\fP[=false]
+ do not use a local cache
+
+.PP
+\fB--no-lock\fP[=false]
+ do not lock the repository, this allows some operations on read-only repositories
+
+.PP
+\fB-o\fP, \fB--option\fP=[]
+ set extended option (\fB\fCkey=value\fR, can be specified multiple times)
+
+.PP
+\fB--pack-size\fP=0
+ set target pack \fB\fCsize\fR in MiB, created pack files may be larger (default: $RESTIC_PACK_SIZE)
+
+.PP
+\fB--password-command\fP=""
+ shell \fB\fCcommand\fR to obtain the repository password from (default: $RESTIC_PASSWORD_COMMAND)
+
+.PP
+\fB-p\fP, \fB--password-file\fP=""
+ \fB\fCfile\fR to read the repository password from (default: $RESTIC_PASSWORD_FILE)
+
+.PP
+\fB-q\fP, \fB--quiet\fP[=false]
+ do not output comprehensive progress report
+
+.PP
+\fB-r\fP, \fB--repo\fP=""
+ \fB\fCrepository\fR to backup to or restore from (default: $RESTIC_REPOSITORY)
+
+.PP
+\fB--repository-file\fP=""
+ \fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
+
+.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
+\fB--tls-client-cert\fP=""
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
+
+.PP
+\fB-v\fP, \fB--verbose\fP[=0]
+ be verbose (specify multiple times or a level using --verbose=n``, max level/times is 2)
+
+
+.SH SEE ALSO
+.PP
+\fBrestic-repair(1)\fP
diff --git a/doc/man/restic-repair.1 b/doc/man/restic-repair.1
new file mode 100644
index 000000000..e1e0520c6
--- /dev/null
+++ b/doc/man/restic-repair.1
@@ -0,0 +1,113 @@
+.nh
+.TH "restic backup" "1" "Jan 2017" "generated by \fB\fCrestic generate\fR" ""
+
+.SH NAME
+.PP
+restic-repair - Repair the repository
+
+
+.SH SYNOPSIS
+.PP
+\fBrestic repair [flags]\fP
+
+
+.SH DESCRIPTION
+.PP
+Repair the repository
+
+
+.SH OPTIONS
+.PP
+\fB-h\fP, \fB--help\fP[=false]
+ help for repair
+
+
+.SH OPTIONS INHERITED FROM PARENT COMMANDS
+.PP
+\fB--cacert\fP=[]
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
+
+.PP
+\fB--cache-dir\fP=""
+ set the cache \fB\fCdirectory\fR\&. (default: use system default cache directory)
+
+.PP
+\fB--cleanup-cache\fP[=false]
+ auto remove old cache directories
+
+.PP
+\fB--compression\fP=auto
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
+
+.PP
+\fB--insecure-tls\fP[=false]
+ skip TLS certificate verification when connecting to the repository (insecure)
+
+.PP
+\fB--json\fP[=false]
+ set output mode to JSON for commands that support it
+
+.PP
+\fB--key-hint\fP=""
+ \fB\fCkey\fR ID of key to try decrypting first (default: $RESTIC_KEY_HINT)
+
+.PP
+\fB--limit-download\fP=0
+ limits downloads to a maximum \fB\fCrate\fR in KiB/s. (default: unlimited)
+
+.PP
+\fB--limit-upload\fP=0
+ limits uploads to a maximum \fB\fCrate\fR in KiB/s. (default: unlimited)
+
+.PP
+\fB--no-cache\fP[=false]
+ do not use a local cache
+
+.PP
+\fB--no-lock\fP[=false]
+ do not lock the repository, this allows some operations on read-only repositories
+
+.PP
+\fB-o\fP, \fB--option\fP=[]
+ set extended option (\fB\fCkey=value\fR, can be specified multiple times)
+
+.PP
+\fB--pack-size\fP=0
+ set target pack \fB\fCsize\fR in MiB, created pack files may be larger (default: $RESTIC_PACK_SIZE)
+
+.PP
+\fB--password-command\fP=""
+ shell \fB\fCcommand\fR to obtain the repository password from (default: $RESTIC_PASSWORD_COMMAND)
+
+.PP
+\fB-p\fP, \fB--password-file\fP=""
+ \fB\fCfile\fR to read the repository password from (default: $RESTIC_PASSWORD_FILE)
+
+.PP
+\fB-q\fP, \fB--quiet\fP[=false]
+ do not output comprehensive progress report
+
+.PP
+\fB-r\fP, \fB--repo\fP=""
+ \fB\fCrepository\fR to backup to or restore from (default: $RESTIC_REPOSITORY)
+
+.PP
+\fB--repository-file\fP=""
+ \fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
+
+.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
+\fB--tls-client-cert\fP=""
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
+
+.PP
+\fB-v\fP, \fB--verbose\fP[=0]
+ be verbose (specify multiple times or a level using --verbose=n``, max level/times is 2)
+
+
+.SH SEE ALSO
+.PP
+\fBrestic(1)\fP, \fBrestic-repair-index(1)\fP, \fBrestic-repair-snapshots(1)\fP
diff --git a/doc/man/restic-restore.1 b/doc/man/restic-restore.1
index 39ff62059..6abc8d5aa 100644
--- a/doc/man/restic-restore.1
+++ b/doc/man/restic-restore.1
@@ -75,7 +75,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
\fB--cacert\fP=[]
- \fB\fCfile\fR to load root certificates from (default: use system certificates)
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
.PP
\fB--cache-dir\fP=""
@@ -87,7 +87,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.PP
\fB--compression\fP=auto
- compression mode (only available for repository format version 2), one of (auto|off|max)
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
.PP
\fB--insecure-tls\fP[=false]
@@ -146,8 +146,12 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
\fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
\fB--tls-client-cert\fP=""
- path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
.PP
\fB-v\fP, \fB--verbose\fP[=0]
diff --git a/doc/man/restic-rewrite.1 b/doc/man/restic-rewrite.1
index 6edf51b95..30960e577 100644
--- a/doc/man/restic-rewrite.1
+++ b/doc/man/restic-rewrite.1
@@ -83,7 +83,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
\fB--cacert\fP=[]
- \fB\fCfile\fR to load root certificates from (default: use system certificates)
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
.PP
\fB--cache-dir\fP=""
@@ -95,7 +95,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.PP
\fB--compression\fP=auto
- compression mode (only available for repository format version 2), one of (auto|off|max)
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
.PP
\fB--insecure-tls\fP[=false]
@@ -154,8 +154,12 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
\fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
\fB--tls-client-cert\fP=""
- path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
.PP
\fB-v\fP, \fB--verbose\fP[=0]
diff --git a/doc/man/restic-self-update.1 b/doc/man/restic-self-update.1
index e311b2277..c981b93fe 100644
--- a/doc/man/restic-self-update.1
+++ b/doc/man/restic-self-update.1
@@ -37,7 +37,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
\fB--cacert\fP=[]
- \fB\fCfile\fR to load root certificates from (default: use system certificates)
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
.PP
\fB--cache-dir\fP=""
@@ -49,7 +49,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.PP
\fB--compression\fP=auto
- compression mode (only available for repository format version 2), one of (auto|off|max)
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
.PP
\fB--insecure-tls\fP[=false]
@@ -108,8 +108,12 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
\fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
\fB--tls-client-cert\fP=""
- path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
.PP
\fB-v\fP, \fB--verbose\fP[=0]
diff --git a/doc/man/restic-snapshots.1 b/doc/man/restic-snapshots.1
index d2dbf52ee..9770e42ef 100644
--- a/doc/man/restic-snapshots.1
+++ b/doc/man/restic-snapshots.1
@@ -27,7 +27,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
use compact output format
.PP
-\fB-g\fP, \fB--group-by\fP=""
+\fB-g\fP, \fB--group-by\fP=
\fB\fCgroup\fR snapshots by host, paths and/or tags, separated by comma
.PP
@@ -54,7 +54,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
\fB--cacert\fP=[]
- \fB\fCfile\fR to load root certificates from (default: use system certificates)
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
.PP
\fB--cache-dir\fP=""
@@ -66,7 +66,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.PP
\fB--compression\fP=auto
- compression mode (only available for repository format version 2), one of (auto|off|max)
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
.PP
\fB--insecure-tls\fP[=false]
@@ -125,8 +125,12 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
\fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
\fB--tls-client-cert\fP=""
- path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
.PP
\fB-v\fP, \fB--verbose\fP[=0]
diff --git a/doc/man/restic-stats.1 b/doc/man/restic-stats.1
index 694bde22d..78b8c94df 100644
--- a/doc/man/restic-stats.1
+++ b/doc/man/restic-stats.1
@@ -76,7 +76,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
\fB--cacert\fP=[]
- \fB\fCfile\fR to load root certificates from (default: use system certificates)
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
.PP
\fB--cache-dir\fP=""
@@ -88,7 +88,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.PP
\fB--compression\fP=auto
- compression mode (only available for repository format version 2), one of (auto|off|max)
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
.PP
\fB--insecure-tls\fP[=false]
@@ -147,8 +147,12 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
\fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
\fB--tls-client-cert\fP=""
- path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
.PP
\fB-v\fP, \fB--verbose\fP[=0]
diff --git a/doc/man/restic-tag.1 b/doc/man/restic-tag.1
index 1ff0b4f78..c5cf273b8 100644
--- a/doc/man/restic-tag.1
+++ b/doc/man/restic-tag.1
@@ -61,7 +61,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
\fB--cacert\fP=[]
- \fB\fCfile\fR to load root certificates from (default: use system certificates)
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
.PP
\fB--cache-dir\fP=""
@@ -73,7 +73,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.PP
\fB--compression\fP=auto
- compression mode (only available for repository format version 2), one of (auto|off|max)
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
.PP
\fB--insecure-tls\fP[=false]
@@ -132,8 +132,12 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
\fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
\fB--tls-client-cert\fP=""
- path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
.PP
\fB-v\fP, \fB--verbose\fP[=0]
diff --git a/doc/man/restic-unlock.1 b/doc/man/restic-unlock.1
index e5b408915..3823e747f 100644
--- a/doc/man/restic-unlock.1
+++ b/doc/man/restic-unlock.1
@@ -34,7 +34,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
\fB--cacert\fP=[]
- \fB\fCfile\fR to load root certificates from (default: use system certificates)
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
.PP
\fB--cache-dir\fP=""
@@ -46,7 +46,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.PP
\fB--compression\fP=auto
- compression mode (only available for repository format version 2), one of (auto|off|max)
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
.PP
\fB--insecure-tls\fP[=false]
@@ -105,8 +105,12 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
\fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
\fB--tls-client-cert\fP=""
- path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
.PP
\fB-v\fP, \fB--verbose\fP[=0]
diff --git a/doc/man/restic-version.1 b/doc/man/restic-version.1
index eca34d60a..db83e6162 100644
--- a/doc/man/restic-version.1
+++ b/doc/man/restic-version.1
@@ -31,7 +31,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
\fB--cacert\fP=[]
- \fB\fCfile\fR to load root certificates from (default: use system certificates)
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
.PP
\fB--cache-dir\fP=""
@@ -43,7 +43,7 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
.PP
\fB--compression\fP=auto
- compression mode (only available for repository format version 2), one of (auto|off|max)
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
.PP
\fB--insecure-tls\fP[=false]
@@ -102,8 +102,12 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
\fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
\fB--tls-client-cert\fP=""
- path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
.PP
\fB-v\fP, \fB--verbose\fP[=0]
diff --git a/doc/man/restic.1 b/doc/man/restic.1
index f76d16e38..9a96533fc 100644
--- a/doc/man/restic.1
+++ b/doc/man/restic.1
@@ -16,11 +16,14 @@ restic - Backup and restore files
restic is a backup program which allows saving multiple revisions of files and
directories in an encrypted repository stored on different backends.
+.PP
+The full documentation can be found at https://restic.readthedocs.io/ .
+
.SH OPTIONS
.PP
\fB--cacert\fP=[]
- \fB\fCfile\fR to load root certificates from (default: use system certificates)
+ \fB\fCfile\fR to load root certificates from (default: use system certificates or $RESTIC_CACERT)
.PP
\fB--cache-dir\fP=""
@@ -32,7 +35,7 @@ directories in an encrypted repository stored on different backends.
.PP
\fB--compression\fP=auto
- compression mode (only available for repository format version 2), one of (auto|off|max)
+ compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)
.PP
\fB-h\fP, \fB--help\fP[=false]
@@ -95,8 +98,12 @@ directories in an encrypted repository stored on different backends.
\fB\fCfile\fR to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
.PP
+\fB--retry-lock\fP=0s
+ retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
+
+.PP
\fB--tls-client-cert\fP=""
- path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key
+ path to a \fB\fCfile\fR containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)
.PP
\fB-v\fP, \fB--verbose\fP[=0]
@@ -105,4 +112,4 @@ directories in an encrypted repository stored on different backends.
.SH SEE ALSO
.PP
-\fBrestic-backup(1)\fP, \fBrestic-cache(1)\fP, \fBrestic-cat(1)\fP, \fBrestic-check(1)\fP, \fBrestic-copy(1)\fP, \fBrestic-diff(1)\fP, \fBrestic-dump(1)\fP, \fBrestic-find(1)\fP, \fBrestic-forget(1)\fP, \fBrestic-generate(1)\fP, \fBrestic-init(1)\fP, \fBrestic-key(1)\fP, \fBrestic-list(1)\fP, \fBrestic-ls(1)\fP, \fBrestic-migrate(1)\fP, \fBrestic-mount(1)\fP, \fBrestic-prune(1)\fP, \fBrestic-rebuild-index(1)\fP, \fBrestic-recover(1)\fP, \fBrestic-restore(1)\fP, \fBrestic-rewrite(1)\fP, \fBrestic-self-update(1)\fP, \fBrestic-snapshots(1)\fP, \fBrestic-stats(1)\fP, \fBrestic-tag(1)\fP, \fBrestic-unlock(1)\fP, \fBrestic-version(1)\fP
+\fBrestic-backup(1)\fP, \fBrestic-cache(1)\fP, \fBrestic-cat(1)\fP, \fBrestic-check(1)\fP, \fBrestic-copy(1)\fP, \fBrestic-diff(1)\fP, \fBrestic-dump(1)\fP, \fBrestic-find(1)\fP, \fBrestic-forget(1)\fP, \fBrestic-generate(1)\fP, \fBrestic-init(1)\fP, \fBrestic-key(1)\fP, \fBrestic-list(1)\fP, \fBrestic-ls(1)\fP, \fBrestic-migrate(1)\fP, \fBrestic-mount(1)\fP, \fBrestic-prune(1)\fP, \fBrestic-recover(1)\fP, \fBrestic-repair(1)\fP, \fBrestic-restore(1)\fP, \fBrestic-rewrite(1)\fP, \fBrestic-self-update(1)\fP, \fBrestic-snapshots(1)\fP, \fBrestic-stats(1)\fP, \fBrestic-tag(1)\fP, \fBrestic-unlock(1)\fP, \fBrestic-version(1)\fP
diff --git a/doc/manual_rest.rst b/doc/manual_rest.rst
index 97480db80..71f5e192b 100644
--- a/doc/manual_rest.rst
+++ b/doc/manual_rest.rst
@@ -26,7 +26,7 @@ Usage help is available:
dump Print a backed-up file to stdout
find Find a file, a directory or restic IDs
forget Remove snapshots from the repository
- generate Generate manual pages and auto-completion files (bash, fish, zsh)
+ generate Generate manual pages and auto-completion files (bash, fish, zsh, powershell)
help Help about any command
init Initialize a new repository
key Manage keys (passwords)
@@ -35,8 +35,8 @@ Usage help is available:
migrate Apply migrations
mount Mount the repository
prune Remove unneeded data from the repository
- rebuild-index Build a new index
recover Recover data from the repository not referenced by snapshots
+ repair Repair the repository
restore Extract the data from a snapshot
rewrite Rewrite snapshots to exclude unwanted files
self-update Update the restic binary
@@ -50,7 +50,7 @@ Usage help is available:
--cacert file file to load root certificates from (default: use system certificates)
--cache-dir directory set the cache directory. (default: use system default cache directory)
--cleanup-cache auto remove old cache directories
- --compression mode compression mode (only available for repository format version 2), one of (auto|off|max) (default auto)
+ --compression mode compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) (default auto)
-h, --help help for restic
--insecure-tls skip TLS certificate verification when connecting to the repository (insecure)
--json set output mode to JSON for commands that support it
@@ -66,6 +66,7 @@ Usage help is available:
-q, --quiet do not output comprehensive progress report
-r, --repo repository repository to backup to or restore from (default: $RESTIC_REPOSITORY)
--repository-file file file to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
+ --retry-lock duration retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
--tls-client-cert file path to a file containing PEM encoded TLS client certificate and private key
-v, --verbose be verbose (specify multiple times or a level using --verbose=n, max level/times is 2)
@@ -105,6 +106,7 @@ command:
--files-from-raw file read the files to backup from file (can be combined with file args; can be specified multiple times)
--files-from-verbatim file read the files to backup from file (can be combined with file args; can be specified multiple times)
-f, --force force re-reading the target files/directories (overrides the "parent" flag)
+ -g, --group-by group group snapshots by host, paths and/or tags, separated by comma (disable grouping with '') (default host,paths)
-h, --help help for backup
-H, --host hostname set the hostname for the snapshot manually. To prevent an expensive rescan use the "parent" flag
--iexclude pattern same as --exclude pattern but ignores the casing of filenames
@@ -113,8 +115,8 @@ command:
--ignore-inode ignore inode number changes when checking for modified files
--no-scan do not run scanner to estimate size of backup
-x, --one-file-system exclude other file systems, don't cross filesystem boundaries and subvolumes
- --parent snapshot use this parent snapshot (default: last snapshot in the repository that has the same target files/directories, and is not newer than the snapshot time)
- --read-concurrency n read n file concurrently (default: $RESTIC_READ_CONCURRENCY or 2)
+ --parent snapshot use this parent snapshot (default: latest snapshot in the group determined by --group-by and not newer than the timestamp determined by --time)
+ --read-concurrency n read n files concurrently (default: $RESTIC_READ_CONCURRENCY or 2)
--stdin read backup from stdin
--stdin-filename filename filename to use when reading from stdin (default "stdin")
--tag tags add tags for the new snapshot in the format `tag[,tag,...]` (can be specified multiple times) (default [])
@@ -126,7 +128,7 @@ command:
--cacert file file to load root certificates from (default: use system certificates)
--cache-dir directory set the cache directory. (default: use system default cache directory)
--cleanup-cache auto remove old cache directories
- --compression mode compression mode (only available for repository format version 2), one of (auto|off|max) (default auto)
+ --compression mode compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION) (default auto)
--insecure-tls skip TLS certificate verification when connecting to the repository (insecure)
--json set output mode to JSON for commands that support it
--key-hint key key ID of key to try decrypting first (default: $RESTIC_KEY_HINT)
@@ -141,6 +143,7 @@ command:
-q, --quiet do not output comprehensive progress report
-r, --repo repository repository to backup to or restore from (default: $RESTIC_REPOSITORY)
--repository-file file file to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
+ --retry-lock duration retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
--tls-client-cert file path to a file containing PEM encoded TLS client certificate and private key
-v, --verbose be verbose (specify multiple times or a level using --verbose=n, max level/times is 2)
@@ -224,7 +227,7 @@ locks with the following command:
d369ccc7d126594950bf74f0a348d5d98d9e99f3215082eb69bf02dc9b3e464c
The ``find`` command searches for a given
-`pattern <https://golang.org/pkg/path/filepath/#Match>`__ in the
+`pattern <https://pkg.go.dev/path/filepath#Match>`__ in the
repository.
.. code-block:: console
@@ -415,7 +418,6 @@ instead of the default, set the environment variable like this:
$ restic -r /srv/restic-repo backup ~/work
-
.. _caching:
Caching
@@ -439,6 +441,10 @@ The command line parameter ``--cache-dir`` or the environment variable
parameter ``--no-cache`` disables the cache entirely. In this case, all data
is loaded from the repository.
+If a cache location is explicitly specified, then the ``check`` command will use
+that location to store its temporary cache. See :ref:`checking-integrity` for
+more details.
+
The cache is ephemeral: When a file cannot be read from the cache, it is loaded
from the repository.
@@ -448,4 +454,3 @@ time it is used, so by looking at the timestamps of the sub directories of the
cache directory it can decide which sub directories are old and probably not
needed any more. You can either remove these directories manually, or run a
restic command with the ``--cleanup-cache`` flag.
-
diff --git a/doc/powershell-completion.ps1 b/doc/powershell-completion.ps1
index 271809161..d8aa5a1af 100644
--- a/doc/powershell-completion.ps1
+++ b/doc/powershell-completion.ps1
@@ -40,6 +40,7 @@ filter __restic_escapeStringWithSpecialChars {
$ShellCompDirectiveNoFileComp=4
$ShellCompDirectiveFilterFileExt=8
$ShellCompDirectiveFilterDirs=16
+ $ShellCompDirectiveKeepOrder=32
# Prepare the command to request completions for the program.
# Split the command at the first space to separate the program and arguments.
@@ -69,8 +70,17 @@ filter __restic_escapeStringWithSpecialChars {
# If the last parameter is complete (there is a space following it)
# We add an extra empty parameter so we can indicate this to the go method.
__restic_debug "Adding extra empty parameter"
- # We need to use `"`" to pass an empty argument a "" or '' does not work!!!
- $RequestComp="$RequestComp" + ' `"`"'
+ # PowerShell 7.2+ changed the way how the arguments are passed to executables,
+ # so for pre-7.2 or when Legacy argument passing is enabled we need to use
+ # `"`" to pass an empty argument, a "" or '' does not work!!!
+ if ($PSVersionTable.PsVersion -lt [version]'7.2.0' -or
+ ($PSVersionTable.PsVersion -lt [version]'7.3.0' -and -not [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -or
+ (($PSVersionTable.PsVersion -ge [version]'7.3.0' -or [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -and
+ $PSNativeCommandArgumentPassing -eq 'Legacy')) {
+ $RequestComp="$RequestComp" + ' `"`"'
+ } else {
+ $RequestComp="$RequestComp" + ' ""'
+ }
}
__restic_debug "Calling $RequestComp"
@@ -100,7 +110,7 @@ filter __restic_escapeStringWithSpecialChars {
}
$Longest = 0
- $Values = $Out | ForEach-Object {
+ [Array]$Values = $Out | ForEach-Object {
#Split the output in name and description
$Name, $Description = $_.Split("`t",2)
__restic_debug "Name: $Name Description: $Description"
@@ -145,6 +155,11 @@ filter __restic_escapeStringWithSpecialChars {
}
}
+ # we sort the values in ascending order by name if keep order isn't passed
+ if (($Directive -band $ShellCompDirectiveKeepOrder) -eq 0 ) {
+ $Values = $Values | Sort-Object -Property Name
+ }
+
if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) {
__restic_debug "ShellCompDirectiveNoFileComp is called"
diff --git a/doc/zsh-completion.zsh b/doc/zsh-completion.zsh
index cea6abb38..ec1c52a00 100644
--- a/doc/zsh-completion.zsh
+++ b/doc/zsh-completion.zsh
@@ -1,4 +1,5 @@
#compdef restic
+compdef _restic restic
# zsh completion for restic -*- shell-script -*-
@@ -17,8 +18,9 @@ _restic()
local shellCompDirectiveNoFileComp=4
local shellCompDirectiveFilterFileExt=8
local shellCompDirectiveFilterDirs=16
+ local shellCompDirectiveKeepOrder=32
- local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace
+ local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace keepOrder
local -a completions
__restic_debug "\n========= starting completion logic =========="
@@ -136,6 +138,11 @@ _restic()
noSpace="-S ''"
fi
+ if [ $((directive & shellCompDirectiveKeepOrder)) -ne 0 ]; then
+ __restic_debug "Activating keep order."
+ keepOrder="-V"
+ fi
+
if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
# File extension filtering
local filteringCmd
@@ -171,7 +178,7 @@ _restic()
return $result
else
__restic_debug "Calling _describe"
- if eval _describe "completions" completions $flagPrefix $noSpace; then
+ if eval _describe $keepOrder "completions" completions $flagPrefix $noSpace; then
__restic_debug "_describe found some completions"
# Return the success of having called _describe
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 9f47fa10f..ecc283f8a 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -1,4 +1,4 @@
-FROM golang:1.19-alpine AS builder
+FROM golang:1.20-alpine AS builder
WORKDIR /go/src/github.com/restic/restic
@@ -11,7 +11,7 @@ RUN go run build.go
FROM alpine:latest AS restic
-RUN apk add --update --no-cache ca-certificates fuse openssh-client tzdata
+RUN apk add --update --no-cache ca-certificates fuse openssh-client tzdata jq
COPY --from=builder /go/src/github.com/restic/restic/restic /usr/bin
diff --git a/docker/Dockerfile.release b/docker/Dockerfile.release
new file mode 100644
index 000000000..ccf80376a
--- /dev/null
+++ b/docker/Dockerfile.release
@@ -0,0 +1,18 @@
+# the official binaries are cross-built from Linux running on an AMD64 host
+# other architectures also seem to generate identical binaries but stay on the safe side
+FROM --platform=linux/amd64 restic/builder:latest as helper
+
+ARG TARGETOS
+ARG TARGETARCH
+
+COPY --chown=build . /restic
+RUN go run helpers/build-release-binaries/main.go --platform $TARGETOS/$TARGETARCH --skip-compress
+RUN mv /output/restic_${TARGETOS}_${TARGETARCH} /output/restic
+
+
+FROM alpine:latest
+
+COPY --from=helper /output/restic /usr/bin
+RUN apk add --update --no-cache ca-certificates fuse openssh-client tzdata jq
+
+ENTRYPOINT ["/usr/bin/restic"]
diff --git a/docker/README.md b/docker/README.md
index 1c2c9205c..444aae3dc 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -16,9 +16,13 @@ Set environment variable `RESTIC_REPOSITORY` and map volume to directories and
files like:
```
-docker run --rm -ti \
+docker run --rm --hostname my-host -ti \
-v $HOME/.restic/passfile:/pass \
-v $HOME/importantdirectory:/data \
-e RESTIC_REPOSITORY=rest:https://user:pass@hostname/ \
restic/restic -p /pass backup /data
```
+
+Restic relies on the hostname for various operations. Make sure to set a static
+hostname using `--hostname` when creating a Docker container, otherwise Docker
+will assign a random hostname each time.
diff --git a/go.mod b/go.mod
index d0e514220..e3bf39f27 100644
--- a/go.mod
+++ b/go.mod
@@ -1,71 +1,78 @@
module github.com/restic/restic
require (
- cloud.google.com/go/storage v1.30.1
- github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0
- github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1
+ cloud.google.com/go/storage v1.31.0
+ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0
+ github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0
+ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0
github.com/anacrolix/fuse v0.2.0
- github.com/cenkalti/backoff/v4 v4.2.0
+ github.com/cenkalti/backoff/v4 v4.2.1
github.com/cespare/xxhash/v2 v2.2.0
github.com/elithrar/simple-scrypt v1.3.0
github.com/go-ole/go-ole v1.2.6
github.com/google/go-cmp v0.5.9
- github.com/hashicorp/golang-lru/v2 v2.0.1
+ github.com/hashicorp/golang-lru/v2 v2.0.4
github.com/juju/ratelimit v1.0.2
- github.com/klauspost/compress v1.16.0
+ github.com/klauspost/compress v1.16.7
github.com/kurin/blazer v0.5.4-0.20230113224640-3887e1ec64b5
- github.com/minio/minio-go/v7 v7.0.50
- github.com/minio/sha256-simd v1.0.0
- github.com/ncw/swift/v2 v2.0.1
+ github.com/minio/minio-go/v7 v7.0.61
+ github.com/minio/sha256-simd v1.0.1
+ github.com/ncw/swift/v2 v2.0.2
github.com/pkg/errors v0.9.1
github.com/pkg/profile v1.7.0
github.com/pkg/sftp v1.13.5
github.com/pkg/xattr v0.4.10-0.20221120235825-35026bbbd013
github.com/restic/chunker v0.4.0
- github.com/spf13/cobra v1.6.1
+ github.com/spf13/cobra v1.7.0
github.com/spf13/pflag v1.0.5
- golang.org/x/crypto v0.7.0
- golang.org/x/net v0.8.0
- golang.org/x/oauth2 v0.6.0
- golang.org/x/sync v0.1.0
- golang.org/x/sys v0.6.0
- golang.org/x/term v0.6.0
- golang.org/x/text v0.8.0
- google.golang.org/api v0.116.0
+ golang.org/x/crypto v0.11.0
+ golang.org/x/net v0.12.0
+ golang.org/x/oauth2 v0.10.0
+ golang.org/x/sync v0.3.0
+ golang.org/x/sys v0.10.0
+ golang.org/x/term v0.10.0
+ golang.org/x/text v0.11.0
+ google.golang.org/api v0.134.0
)
require (
- cloud.google.com/go v0.110.0 // indirect
- cloud.google.com/go/compute v1.19.0 // indirect
+ cloud.google.com/go v0.110.6 // indirect
+ cloud.google.com/go/compute v1.23.0 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
- cloud.google.com/go/iam v0.13.0 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 // indirect
+ cloud.google.com/go/iam v1.1.1 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect
+ github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
- github.com/dnaeon/go-vcr v1.2.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/felixge/fgprof v0.9.3 // indirect
+ github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
- github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b // indirect
+ github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 // indirect
+ github.com/google/s2a-go v0.1.4 // indirect
github.com/google/uuid v1.3.0 // indirect
- github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect
- github.com/googleapis/gax-go/v2 v2.8.0 // indirect
+ github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect
+ github.com/googleapis/gax-go/v2 v2.12.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/cpuid/v2 v2.2.4 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/kr/fs v0.1.0 // indirect
+ github.com/kylelemons/godebug v1.1.0 // indirect
github.com/minio/md5-simd v1.1.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
- github.com/rs/xid v1.4.0 // indirect
+ github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
+ github.com/rs/xid v1.5.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
- github.com/sirupsen/logrus v1.9.0 // indirect
+ github.com/sirupsen/logrus v1.9.3 // indirect
go.opencensus.io v0.24.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/appengine v1.6.7 // indirect
- google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633 // indirect
- google.golang.org/grpc v1.54.0 // indirect
- google.golang.org/protobuf v1.30.0 // indirect
+ google.golang.org/genproto v0.0.0-20230726155614-23370e0ffb3e // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20230726155614-23370e0ffb3e // indirect
+ google.golang.org/grpc v1.57.0 // indirect
+ google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/go.sum b/go.sum
index ce664c15a..28b1527a2 100644
--- a/go.sum
+++ b/go.sum
@@ -1,30 +1,35 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys=
-cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY=
-cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ=
-cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.110.6 h1:8uYAkj3YHTP/1iwReuHPxLSbdcyc+dSBbzFMrVwDR6Q=
+cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI=
+cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY=
+cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
-cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k=
-cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0=
-cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM=
-cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM=
-cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 h1:rTnT/Jrcm+figWlYz4Ixzt0SJVR2cMC8lvZcimipiEY=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0/go.mod h1:ON4tFdPTwRcgWEaVDrN3584Ef+b7GgSJaXxe5fW9t4M=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 h1:+5VZ72z0Qan5Bog5C+ZkgSqUbeVUd9wgtHOrIKuc5b8=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
-github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1 h1:BMTdr+ib5ljLa9MxTJK8x/Ds0MbBb4MfuW5BL0zMJnI=
-github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1/go.mod h1:c6WvOhtmjNUWbLfOG1qxM/q0SPvQNSVJvolm+C52dIU=
-github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 h1:BWe8a+f/t+7KY7zH2mqygeUD0t8hNFXe08p1Pb3/jKE=
+cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y=
+cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU=
+cloud.google.com/go/storage v1.31.0 h1:+S3LjjEN2zZ+L5hOwj4+1OkGCsLVe0NzpXKQ1pSdTCI=
+cloud.google.com/go/storage v1.31.0/go.mod h1:81ams1PrhW16L4kF7qg+4mTq7SRs5HsbDTM0bWvrwJ0=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 h1:8q4SaHjFsClSvuVne0ID/5Ka8u3fcIHyqkLjcFpNRHQ=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0 h1:Ma67P/GGprNwsslzEH6+Kb8nybI8jpDTm4Wmzu2ReK8=
+github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0 h1:nVocQV40OQne5613EeLayJiRAJuKlBGy+m22qWG+WRg=
+github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0/go.mod h1:7QJP7dr2wznCMeqIrhMgWGf7XpAQnVrJqDm9nvV3Cu4=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74=
github.com/anacrolix/fuse v0.2.0 h1:pc+To78kI2d/WUjIyrsdqeJQAesuwpGxlI3h1nAv3Do=
github.com/anacrolix/fuse v0.2.0/go.mod h1:Kfu02xBwnySDpH3N23BmrP3MDfwAQGRLUCj6XyeOvBQ=
-github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4=
-github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
+github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
@@ -32,13 +37,17 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
-github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
@@ -48,12 +57,16 @@ github.com/elithrar/simple-scrypt v1.3.0/go.mod h1:U2XQRI95XHY0St410VE3UjT7vuKb1
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g=
github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
-github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c=
+github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
+github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
@@ -62,14 +75,17 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@@ -84,51 +100,53 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw=
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
-github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b h1:8htHrh2bw9c7Idkb7YNac+ZpTqLMjRpI+FWu51ltaQc=
-github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
+github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 h1:n6vlPhxsA+BW/XsS5+uqi7GyzaLa5MH7qlSLBZtRdiA=
+github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA=
+github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc=
+github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k=
-github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
-github.com/googleapis/gax-go/v2 v2.8.0 h1:UBtEZqx1bjXtOQ5BVTkuYghXrr3N4V123VKJK67vJZc=
-github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI=
-github.com/hashicorp/golang-lru/v2 v2.0.1 h1:5pv5N1lT1fjLg2VQ5KWc7kmucp2x/kvFOnxuVTqZ6x4=
-github.com/hashicorp/golang-lru/v2 v2.0.1/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
+github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM=
+github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w=
+github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
+github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/hashicorp/golang-lru/v2 v2.0.4 h1:7GHuZcgid37q8o5i3QI9KMT4nCWQQ3Kx3Ov6bb9MfK0=
+github.com/hashicorp/golang-lru/v2 v2.0.4/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
-github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/juju/ratelimit v1.0.2 h1:sRxmtRiajbvrcLQT7S+JbqU0ntsb9W2yhSdNN8tWfaI=
github.com/juju/ratelimit v1.0.2/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk=
-github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4=
-github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
+github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk=
-github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
+github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
+github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kurin/blazer v0.5.4-0.20230113224640-3887e1ec64b5 h1:OUlGa6AAolmjyPtILbMJ8vHayz5wd4wBUloheGcMhfA=
github.com/kurin/blazer v0.5.4-0.20230113224640-3887e1ec64b5/go.mod h1:4FCXMUWo9DllR2Do4TtBd377ezyAJ51vB5uTBjt0pGU=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
-github.com/minio/minio-go/v7 v7.0.50 h1:4IL4V8m/kI90ZL6GupCARZVrBv8/XrcKcJhaJ3iz68k=
-github.com/minio/minio-go/v7 v7.0.50/go.mod h1:IbbodHyjUAguneyucUaahv+VMNs/EOTV9du7A7/Z3HU=
-github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
-github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
+github.com/minio/minio-go/v7 v7.0.61 h1:87c+x8J3jxQ5VUGimV9oHdpjsAvy3fhneEBKuoKEVUI=
+github.com/minio/minio-go/v7 v7.0.61/go.mod h1:BTu8FcrEw+HidY0zd/0eny43QnVNkXRPXrLXFuQBHXg=
+github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
+github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
-github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
-github.com/ncw/swift/v2 v2.0.1 h1:q1IN8hNViXEv8Zvg3Xdis4a3c4IlIGezkYz09zQL5J0=
-github.com/ncw/swift/v2 v2.0.1/go.mod h1:z0A9RVdYPjNjXVo2pDOPxZ4eu3oarO1P91fTItcb+Kg=
-github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI=
+github.com/ncw/swift/v2 v2.0.2 h1:jx282pcAKFhmoZBSdMcCRFn9VWkoBIRsCpe+yZq7vEk=
+github.com/ncw/swift/v2 v2.0.2/go.mod h1:z0A9RVdYPjNjXVo2pDOPxZ4eu3oarO1P91fTItcb+Kg=
+github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
+github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
@@ -143,14 +161,15 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:
github.com/restic/chunker v0.4.0 h1:YUPYCUn70MYP7VO4yllypp2SjmsRhRJaad3xKu1QFRw=
github.com/restic/chunker v0.4.0/go.mod h1:z0cH2BejpW636LXw0R/BGyv+Ey8+m9QGiOanDHItzyw=
github.com/robertkrimen/godocdown v0.0.0-20130622164427-0bfa04905481/go.mod h1:C9WhFzY47SzYBIvzFqSvHIR6ROgDo4TtdTuRaOMjF/s=
-github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY=
-github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
+github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
-github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
-github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
-github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
+github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stephens2424/writerset v1.0.2/go.mod h1:aS2JhsMn6eA7e82oNmW4rfsgAOp9COBTTl8mzkwADnc=
@@ -158,6 +177,7 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
@@ -166,65 +186,84 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ=
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
-golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
+golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
+golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
-golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
+golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw=
-golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8=
+golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
-golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
+golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
-golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
+golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
-golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
+golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
-golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
+golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
+golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@@ -232,29 +271,39 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200423201157-2723c5de0d66/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
-google.golang.org/api v0.116.0 h1:09tOPVufPwfm5W4aA8EizGHJ7BcoRDsIareM2a15gO4=
-google.golang.org/api v0.116.0/go.mod h1:9cD4/t6uvd9naoEJFA+M96d0IuB6BqFuyhpw68+mRGg=
+google.golang.org/api v0.134.0 h1:ktL4Goua+UBgoP1eL1/60LwZJqa1sIzkLmvoR3hR6Gw=
+google.golang.org/api v0.134.0/go.mod h1:sjRL3UnjTx5UqNQS9EWr9N8p7xbHpy1k0XGRLCf3Spk=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633 h1:0BOZf6qNozI3pkN3fJLwNubheHJYHhMh91GRFOWWK08=
-google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
+google.golang.org/genproto v0.0.0-20230726155614-23370e0ffb3e h1:xIXmWJ303kJCuogpj0bHq+dcjcZHU+XFyc1I0Yl9cRg=
+google.golang.org/genproto v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:0ggbjUrZYpy1q+ANUS30SEoGZ53cdfwtbuG7Ptgy108=
+google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw=
+google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230726155614-23370e0ffb3e h1:S83+ibolgyZ0bqz7KEsUOPErxcv4VzlszxY+31OfB/E=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag=
-google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
+google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
+google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -266,13 +315,14 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
-google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
+google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
diff --git a/helpers/build-release-binaries/main.go b/helpers/build-release-binaries/main.go
index 0c0015f42..f14f60db6 100644
--- a/helpers/build-release-binaries/main.go
+++ b/helpers/build-release-binaries/main.go
@@ -1,11 +1,14 @@
package main
import (
+ "errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
+ "sort"
+ "strconv"
"strings"
"time"
@@ -14,16 +17,24 @@ import (
)
var opts = struct {
- Verbose bool
- SourceDir string
- OutputDir string
- Version string
+ Verbose bool
+ SourceDir string
+ OutputDir string
+ Tags string
+ PlatformSubset string
+ Platform string
+ SkipCompress bool
+ Version string
}{}
func init() {
pflag.BoolVarP(&opts.Verbose, "verbose", "v", false, "be verbose")
pflag.StringVarP(&opts.SourceDir, "source", "s", "/restic", "path to the source code `directory`")
pflag.StringVarP(&opts.OutputDir, "output", "o", "/output", "path to the output `directory`")
+ pflag.StringVar(&opts.Tags, "tags", "", "additional build `tags`")
+ pflag.StringVar(&opts.PlatformSubset, "platform-subset", "", "specify `n/t` to only build this subset")
+ pflag.StringVarP(&opts.Platform, "platform", "p", "", "specify `os/arch` to only build this specific platform")
+ pflag.BoolVar(&opts.SkipCompress, "skip-compress", false, "skip binary compression step")
pflag.StringVar(&opts.Version, "version", "", "use `x.y.z` as the version for output files")
pflag.Parse()
}
@@ -95,10 +106,15 @@ func build(sourceDir, outputDir, goos, goarch string) (filename string) {
}
outputFile := filepath.Join(outputDir, filename)
+ tags := "selfupdate"
+ if opts.Tags != "" {
+ tags += "," + opts.Tags
+ }
+
c := exec.Command("go", "build",
"-o", outputFile,
"-ldflags", "-s -w",
- "-tags", "selfupdate",
+ "-tags", tags,
"./cmd/restic",
)
c.Stdout = os.Stdout
@@ -176,7 +192,9 @@ func buildForTarget(sourceDir, outputDir, goos, goarch string) (filename string)
filename = build(sourceDir, outputDir, goos, goarch)
touch(filepath.Join(outputDir, filename), mtime)
chmod(filepath.Join(outputDir, filename), 0755)
- filename = compress(goos, outputDir, filename)
+ if !opts.SkipCompress {
+ filename = compress(goos, outputDir, filename)
+ }
return filename
}
@@ -220,7 +238,6 @@ func buildTargets(sourceDir, outputDir string, targets map[string][]string) {
msg("build finished in %.3fs", time.Since(start).Seconds())
}
-// ATTENTION: the list of architectures must be in sync with .github/workflows/tests.yml!
var defaultBuildTargets = map[string][]string{
"aix": {"ppc64"},
"darwin": {"amd64", "arm64"},
@@ -244,15 +261,71 @@ func downloadModules(sourceDir string) {
}
}
+func selectSubset(subset string, target map[string][]string) (map[string][]string, error) {
+ t, n, _ := strings.Cut(subset, "/")
+ part, err := strconv.ParseInt(t, 10, 8)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse platform subset %q", subset)
+ }
+ total, err := strconv.ParseInt(n, 10, 8)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse platform subset %q", subset)
+ }
+ if total < 0 || part < 0 {
+ return nil, errors.New("platform subset out of range")
+ }
+ if part >= total {
+ return nil, errors.New("t must be in 0 <= t < n")
+ }
+
+ // flatten platform list
+ platforms := []string{}
+ for os, archs := range target {
+ for _, arch := range archs {
+ platforms = append(platforms, os+"/"+arch)
+ }
+ }
+ sort.Strings(platforms)
+
+ // select subset
+ lower := len(platforms) * int(part) / int(total)
+ upper := len(platforms) * int(part+1) / int(total)
+ platforms = platforms[lower:upper]
+
+ return buildPlatformList(platforms), nil
+}
+
+func buildPlatformList(platforms []string) map[string][]string {
+ fmt.Printf("Building for %v\n", platforms)
+
+ targets := make(map[string][]string)
+ for _, platform := range platforms {
+ os, arch, _ := strings.Cut(platform, "/")
+ targets[os] = append(targets[os], arch)
+ }
+ return targets
+}
+
func main() {
if len(pflag.Args()) != 0 {
die("USAGE: build-release-binaries [OPTIONS]")
}
+ targets := defaultBuildTargets
+ if opts.PlatformSubset != "" {
+ var err error
+ targets, err = selectSubset(opts.PlatformSubset, targets)
+ if err != nil {
+ die("%s", err)
+ }
+ } else if opts.Platform != "" {
+ targets = buildPlatformList([]string{opts.Platform})
+ }
+
sourceDir := abs(opts.SourceDir)
outputDir := abs(opts.OutputDir)
mkdir(outputDir)
downloadModules(sourceDir)
- buildTargets(sourceDir, outputDir, defaultBuildTargets)
+ buildTargets(sourceDir, outputDir, targets)
}
diff --git a/helpers/prepare-release/main.go b/helpers/prepare-release/main.go
index 03924b0d9..a6c7bd4f4 100644
--- a/helpers/prepare-release/main.go
+++ b/helpers/prepare-release/main.go
@@ -4,6 +4,7 @@ import (
"bufio"
"bytes"
"fmt"
+ "math/rand"
"os"
"os/exec"
"path/filepath"
@@ -409,13 +410,19 @@ func signFiles(filenames ...string) {
}
}
-func updateDocker(outputDir, version string) {
- cmd := fmt.Sprintf("bzcat %s/restic_%s_linux_amd64.bz2 > restic", outputDir, version)
- run("sh", "-c", cmd)
- run("chmod", "+x", "restic")
- run("docker", "pull", "alpine:latest")
- run("docker", "build", "--rm", "--tag", "restic/restic:latest", "-f", "docker/Dockerfile", ".")
- run("docker", "tag", "restic/restic:latest", "restic/restic:"+version)
+func updateDocker(sourceDir, version string) string {
+ r := rand.New(rand.NewSource(time.Now().UnixNano()))
+ builderName := fmt.Sprintf("restic-release-builder-%d", r.Int())
+ run("docker", "buildx", "create", "--name", builderName, "--driver", "docker-container", "--bootstrap")
+
+ buildCmd := fmt.Sprintf("docker buildx build --builder %s --platform linux/386,linux/amd64,linux/arm,linux/arm64 --pull -f docker/Dockerfile.release %q", builderName, sourceDir)
+ run("sh", "-c", buildCmd+" --no-cache")
+
+ publishCmds := ""
+ for _, tag := range []string{"restic/restic:latest", "restic/restic:" + version} {
+ publishCmds += buildCmd + fmt.Sprintf(" --tag %q --push\n", tag)
+ }
+ return publishCmds + "\ndocker buildx rm " + builderName
}
func tempdir(prefix string) string {
@@ -464,15 +471,14 @@ func main() {
extractTar(tarFilename, sourceDir)
runBuild(sourceDir, opts.OutputDir, opts.Version)
- rmdir(sourceDir)
sha256sums(opts.OutputDir, filepath.Join(opts.OutputDir, "SHA256SUMS"))
signFiles(filepath.Join(opts.OutputDir, "SHA256SUMS"), tarFilename)
- updateDocker(opts.OutputDir, opts.Version)
+ dockerCmds := updateDocker(sourceDir, opts.Version)
msg("done, output dir is %v", opts.OutputDir)
- msg("now run:\n\ngit push --tags origin master\ndocker push restic/restic:latest\ndocker push restic/restic:%s\n", opts.Version)
+ msg("now run:\n\ngit push --tags origin master\n%s\n\nrm -rf %q", dockerCmds, sourceDir)
}
diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go
index a56965d63..98819d797 100644
--- a/internal/archiver/archiver.go
+++ b/internal/archiver/archiver.go
@@ -207,7 +207,7 @@ func (arch *Archiver) wrapLoadTreeError(id restic.ID, err error) error {
if arch.Repo.Index().Has(restic.BlobHandle{ID: id, Type: restic.TreeBlob}) {
err = errors.Errorf("tree %v could not be loaded; the repository could be damaged: %v", id, err)
} else {
- err = errors.Errorf("tree %v is not known; the repository could be damaged, run `rebuild-index` to try to repair it", id)
+ err = errors.Errorf("tree %v is not known; the repository could be damaged, run `repair index` to try to repair it", id)
}
return err
}
@@ -680,6 +680,7 @@ type SnapshotOptions struct {
Excludes []string
Time time.Time
ParentSnapshot *restic.Snapshot
+ ProgramVersion string
}
// loadParentTree loads a tree referenced by snapshot id. If id is null, nil is returned.
@@ -796,6 +797,7 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps
return nil, restic.ID{}, err
}
+ sn.ProgramVersion = opts.ProgramVersion
sn.Excludes = opts.Excludes
if opts.ParentSnapshot != nil {
sn.Parent = opts.ParentSnapshot.ID()
diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go
index e3a850591..3c87055d8 100644
--- a/internal/archiver/archiver_test.go
+++ b/internal/archiver/archiver_test.go
@@ -419,7 +419,7 @@ type blobCountingRepo struct {
}
func (repo *blobCountingRepo) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, int, error) {
- id, exists, size, err := repo.Repository.SaveBlob(ctx, t, buf, id, false)
+ id, exists, size, err := repo.Repository.SaveBlob(ctx, t, buf, id, storeDuplicate)
if exists {
return id, exists, size, err
}
@@ -1845,27 +1845,27 @@ type noCancelBackend struct {
restic.Backend
}
-func (c *noCancelBackend) Remove(ctx context.Context, h restic.Handle) error {
+func (c *noCancelBackend) Remove(_ context.Context, h restic.Handle) error {
return c.Backend.Remove(context.Background(), h)
}
-func (c *noCancelBackend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
+func (c *noCancelBackend) Save(_ context.Context, h restic.Handle, rd restic.RewindReader) error {
return c.Backend.Save(context.Background(), h, rd)
}
-func (c *noCancelBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
+func (c *noCancelBackend) Load(_ context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
return c.Backend.Load(context.Background(), h, length, offset, fn)
}
-func (c *noCancelBackend) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) {
+func (c *noCancelBackend) Stat(_ context.Context, h restic.Handle) (restic.FileInfo, error) {
return c.Backend.Stat(context.Background(), h)
}
-func (c *noCancelBackend) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error {
+func (c *noCancelBackend) List(_ context.Context, t restic.FileType, fn func(restic.FileInfo) error) error {
return c.Backend.List(context.Background(), t, fn)
}
-func (c *noCancelBackend) Delete(ctx context.Context) error {
+func (c *noCancelBackend) Delete(_ context.Context) error {
return c.Backend.Delete(context.Background())
}
@@ -2166,7 +2166,7 @@ func TestMetadataChanged(t *testing.T) {
}
// modify the mode by wrapping it in a new struct, uses the consts defined above
- fs.OverrideLstat["testfile"] = wrapFileInfo(t, fi)
+ fs.OverrideLstat["testfile"] = wrapFileInfo(fi)
// set the override values in the 'want' node which
want.Mode = 0400
diff --git a/internal/archiver/archiver_unix_test.go b/internal/archiver/archiver_unix_test.go
index 1167f6852..7523f0749 100644
--- a/internal/archiver/archiver_unix_test.go
+++ b/internal/archiver/archiver_unix_test.go
@@ -6,7 +6,6 @@ package archiver
import (
"os"
"syscall"
- "testing"
)
type wrappedFileInfo struct {
@@ -24,7 +23,7 @@ func (fi wrappedFileInfo) Mode() os.FileMode {
}
// wrapFileInfo returns a new os.FileInfo with the mode, owner, and group fields changed.
-func wrapFileInfo(t testing.TB, fi os.FileInfo) os.FileInfo {
+func wrapFileInfo(fi os.FileInfo) os.FileInfo {
// get the underlying stat_t and modify the values
stat := fi.Sys().(*syscall.Stat_t)
stat.Mode = mockFileInfoMode
diff --git a/internal/archiver/archiver_windows_test.go b/internal/archiver/archiver_windows_test.go
index 1254e64ee..e1195030f 100644
--- a/internal/archiver/archiver_windows_test.go
+++ b/internal/archiver/archiver_windows_test.go
@@ -5,7 +5,6 @@ package archiver
import (
"os"
- "testing"
)
type wrappedFileInfo struct {
@@ -18,7 +17,7 @@ func (fi wrappedFileInfo) Mode() os.FileMode {
}
// wrapFileInfo returns a new os.FileInfo with the mode, owner, and group fields changed.
-func wrapFileInfo(t testing.TB, fi os.FileInfo) os.FileInfo {
+func wrapFileInfo(fi os.FileInfo) os.FileInfo {
// wrap the os.FileInfo and return the modified mode, uid and gid are ignored on Windows
res := wrappedFileInfo{
FileInfo: fi,
diff --git a/internal/archiver/blob_saver_test.go b/internal/archiver/blob_saver_test.go
index 367b7be8b..1996c35b8 100644
--- a/internal/archiver/blob_saver_test.go
+++ b/internal/archiver/blob_saver_test.go
@@ -22,7 +22,7 @@ type saveFail struct {
failAt int32
}
-func (b *saveFail) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicates bool) (restic.ID, bool, int, error) {
+func (b *saveFail) SaveBlob(_ context.Context, _ restic.BlobType, _ []byte, id restic.ID, _ bool) (restic.ID, bool, int, error) {
val := atomic.AddInt32(&b.cnt, 1)
if val == b.failAt {
return restic.ID{}, false, 0, errTest
diff --git a/internal/archiver/file_saver_test.go b/internal/archiver/file_saver_test.go
index 5c4472c62..b088eeeed 100644
--- a/internal/archiver/file_saver_test.go
+++ b/internal/archiver/file_saver_test.go
@@ -18,7 +18,7 @@ import (
func createTestFiles(t testing.TB, num int) (files []string) {
tempdir := test.TempDir(t)
- for i := 0; i < 15; i++ {
+ for i := 0; i < num; i++ {
filename := fmt.Sprintf("testfile-%d", i)
err := os.WriteFile(filepath.Join(tempdir, filename), []byte(filename), 0600)
if err != nil {
diff --git a/internal/archiver/tree_saver.go b/internal/archiver/tree_saver.go
index d25781b03..a7dae3873 100644
--- a/internal/archiver/tree_saver.go
+++ b/internal/archiver/tree_saver.go
@@ -105,14 +105,15 @@ func (s *TreeSaver) save(ctx context.Context, job *saveTreeJob) (*restic.Node, I
continue
}
- debug.Log("insert %v", fnr.node.Name)
err := builder.AddNode(fnr.node)
if err != nil && errors.Is(err, restic.ErrTreeNotOrdered) && lastNode != nil && fnr.node.Equals(*lastNode) {
+ debug.Log("insert %v failed: %v", fnr.node.Name, err)
// ignore error if an _identical_ node already exists, but nevertheless issue a warning
_ = s.errFn(fnr.target, err)
err = nil
}
if err != nil {
+ debug.Log("insert %v failed: %v", fnr.node.Name, err)
return nil, stats, err
}
lastNode = fnr.node
diff --git a/internal/archiver/tree_saver_test.go b/internal/archiver/tree_saver_test.go
index 7cc53346c..5de4375d6 100644
--- a/internal/archiver/tree_saver_test.go
+++ b/internal/archiver/tree_saver_test.go
@@ -12,7 +12,7 @@ import (
"golang.org/x/sync/errgroup"
)
-func treeSaveHelper(ctx context.Context, t restic.BlobType, buf *Buffer, cb func(res SaveBlobResponse)) {
+func treeSaveHelper(_ context.Context, _ restic.BlobType, buf *Buffer, cb func(res SaveBlobResponse)) {
cb(SaveBlobResponse{
id: restic.NewRandomID(),
known: false,
diff --git a/internal/backend/azure/azure.go b/internal/backend/azure/azure.go
index 02433795b..50be63d5a 100644
--- a/internal/backend/azure/azure.go
+++ b/internal/backend/azure/azure.go
@@ -14,19 +14,19 @@ import (
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/layout"
- "github.com/restic/restic/internal/backend/sema"
+ "github.com/restic/restic/internal/backend/location"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
+ "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
azContainer "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
- "github.com/cenkalti/backoff/v4"
)
// Backend stores data on an azure endpoint.
@@ -34,7 +34,6 @@ type Backend struct {
cfg Config
container *azContainer.Client
connections uint
- sem sema.Semaphore
prefix string
listMaxItems int
layout.Layout
@@ -46,15 +45,25 @@ const defaultListMaxItems = 5000
// make sure that *Backend implements backend.Backend
var _ restic.Backend = &Backend{}
+func NewFactory() location.Factory {
+ return location.NewHTTPBackendFactory("azure", ParseConfig, location.NoPassword, Create, Open)
+}
+
func open(cfg Config, rt http.RoundTripper) (*Backend, error) {
debug.Log("open, config %#v", cfg)
var client *azContainer.Client
var err error
- url := fmt.Sprintf("https://%s.blob.core.windows.net/%s", cfg.AccountName, cfg.Container)
+ var endpointSuffix string
+ if cfg.EndpointSuffix != "" {
+ endpointSuffix = cfg.EndpointSuffix
+ } else {
+ endpointSuffix = "core.windows.net"
+ }
+ url := fmt.Sprintf("https://%s.blob.%s/%s", cfg.AccountName, endpointSuffix, cfg.Container)
opts := &azContainer.ClientOptions{
ClientOptions: azcore.ClientOptions{
- Transport: http.DefaultClient,
+ Transport: &http.Client{Transport: rt},
},
}
@@ -93,19 +102,22 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) {
return nil, errors.Wrap(err, "NewAccountSASClientFromEndpointToken")
}
} else {
- return nil, errors.New("no azure authentication information found")
- }
+ debug.Log(" - using DefaultAzureCredential")
+ cred, err := azidentity.NewDefaultAzureCredential(nil)
+ if err != nil {
+ return nil, errors.Wrap(err, "NewDefaultAzureCredential")
+ }
- sem, err := sema.New(cfg.Connections)
- if err != nil {
- return nil, err
+ client, err = azContainer.NewClient(url, cred, opts)
+ if err != nil {
+ return nil, errors.Wrap(err, "NewClient")
+ }
}
be := &Backend{
container: client,
cfg: cfg,
connections: cfg.Connections,
- sem: sem,
Layout: &layout.DefaultLayout{
Path: cfg.Prefix,
Join: path.Join,
@@ -117,7 +129,7 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) {
}
// Open opens the Azure backend at specified container.
-func Open(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) {
+func Open(_ context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) {
return open(cfg, rt)
}
@@ -152,7 +164,6 @@ func (be *Backend) SetListMaxItems(i int) {
// IsNotExist returns true if the error is caused by a not existing file.
func (be *Backend) IsNotExist(err error) bool {
- debug.Log("IsNotExist(%T, %#v)", err, err)
return bloberror.HasCode(err, bloberror.BlobNotFound)
}
@@ -187,16 +198,8 @@ func (be *Backend) Path() string {
// Save stores data in the backend at the handle.
func (be *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
- if err := h.Valid(); err != nil {
- return backoff.Permanent(err)
- }
-
objName := be.Filename(h)
- debug.Log("Save %v at %v", h, objName)
-
- be.sem.GetToken()
-
debug.Log("InsertObject(%v, %v)", be.cfg.AccountName, objName)
var err error
@@ -208,9 +211,6 @@ func (be *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindRe
err = be.saveLarge(ctx, objName, rd)
}
- be.sem.ReleaseToken()
- debug.Log("%v, err %#v", objName, err)
-
return err
}
@@ -228,7 +228,7 @@ func (be *Backend) saveSmall(ctx context.Context, objName string, rd restic.Rewi
reader := bytes.NewReader(buf)
_, err = blockBlobClient.StageBlock(ctx, id, streaming.NopCloser(reader), &blockblob.StageBlockOptions{
- TransactionalContentMD5: rd.Hash(),
+ TransactionalValidation: blob.TransferValidationTypeMD5(rd.Hash()),
})
if err != nil {
return errors.Wrap(err, "StageBlock")
@@ -271,7 +271,7 @@ func (be *Backend) saveLarge(ctx context.Context, objName string, rd restic.Rewi
reader := bytes.NewReader(buf)
debug.Log("StageBlock %v with %d bytes", id, len(buf))
_, err = blockBlobClient.StageBlock(ctx, id, streaming.NopCloser(reader), &blockblob.StageBlockOptions{
- TransactionalContentMD5: h[:],
+ TransactionalValidation: blob.TransferValidationTypeMD5(h[:]),
})
if err != nil {
@@ -299,23 +299,9 @@ func (be *Backend) Load(ctx context.Context, h restic.Handle, length int, offset
}
func (be *Backend) openReader(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
- debug.Log("Load %v, length %v, offset %v from %v", h, length, offset, be.Filename(h))
- if err := h.Valid(); err != nil {
- return nil, backoff.Permanent(err)
- }
-
- if offset < 0 {
- return nil, errors.New("offset is negative")
- }
-
- if length < 0 {
- return nil, errors.Errorf("invalid length %d", length)
- }
-
objName := be.Filename(h)
blockBlobClient := be.container.NewBlobClient(objName)
- be.sem.GetToken()
resp, err := blockBlobClient.DownloadStream(ctx, &blob.DownloadStreamOptions{
Range: azblob.HTTPRange{
Offset: offset,
@@ -324,26 +310,20 @@ func (be *Backend) openReader(ctx context.Context, h restic.Handle, length int,
})
if err != nil {
- be.sem.ReleaseToken()
return nil, err
}
- return be.sem.ReleaseTokenOnClose(resp.Body, nil), err
+ return resp.Body, err
}
// Stat returns information about a blob.
func (be *Backend) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) {
- debug.Log("%v", h)
-
objName := be.Filename(h)
blobClient := be.container.NewBlobClient(objName)
- be.sem.GetToken()
props, err := blobClient.GetProperties(ctx, nil)
- be.sem.ReleaseToken()
if err != nil {
- debug.Log("blob.GetProperties err %v", err)
return restic.FileInfo{}, errors.Wrap(err, "blob.GetProperties")
}
@@ -359,11 +339,7 @@ func (be *Backend) Remove(ctx context.Context, h restic.Handle) error {
objName := be.Filename(h)
blob := be.container.NewBlobClient(objName)
- be.sem.GetToken()
_, err := blob.Delete(ctx, &azblob.DeleteBlobOptions{})
- be.sem.ReleaseToken()
-
- debug.Log("Remove(%v) at %v -> err %v", h, objName, err)
if be.IsNotExist(err) {
return nil
@@ -375,8 +351,6 @@ func (be *Backend) Remove(ctx context.Context, h restic.Handle) error {
// List runs fn for each file in the backend which has the type t. When an
// error occurs (or fn returns an error), List stops and returns it.
func (be *Backend) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error {
- debug.Log("listing %v", t)
-
prefix, _ := be.Basedir(t)
// make sure prefix ends with a slash
@@ -393,9 +367,7 @@ func (be *Backend) List(ctx context.Context, t restic.FileType, fn func(restic.F
lister := be.container.NewListBlobsFlatPager(opts)
for lister.More() {
- be.sem.GetToken()
resp, err := lister.NextPage(ctx)
- be.sem.ReleaseToken()
if err != nil {
return err
@@ -433,30 +405,9 @@ func (be *Backend) List(ctx context.Context, t restic.FileType, fn func(restic.F
return ctx.Err()
}
-// Remove keys for a specified backend type.
-func (be *Backend) removeKeys(ctx context.Context, t restic.FileType) error {
- return be.List(ctx, t, func(fi restic.FileInfo) error {
- return be.Remove(ctx, restic.Handle{Type: t, Name: fi.Name})
- })
-}
-
// Delete removes all restic keys in the bucket. It will not remove the bucket itself.
func (be *Backend) Delete(ctx context.Context) error {
- alltypes := []restic.FileType{
- restic.PackFile,
- restic.KeyFile,
- restic.LockFile,
- restic.SnapshotFile,
- restic.IndexFile}
-
- for _, t := range alltypes {
- err := be.removeKeys(ctx, t)
- if err != nil {
- return nil
- }
- }
-
- return be.Remove(ctx, restic.Handle{Type: restic.ConfigFile})
+ return backend.DefaultDelete(ctx, be)
}
// Close does nothing
diff --git a/internal/backend/azure/azure_test.go b/internal/backend/azure/azure_test.go
index ada6ec2ca..33f65bd52 100644
--- a/internal/backend/azure/azure_test.go
+++ b/internal/backend/azure/azure_test.go
@@ -12,76 +12,29 @@ import (
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/azure"
"github.com/restic/restic/internal/backend/test"
- "github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/options"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
-func newAzureTestSuite(t testing.TB) *test.Suite {
- tr, err := backend.Transport(backend.TransportOptions{})
- if err != nil {
- t.Fatalf("cannot create transport for tests: %v", err)
- }
-
- return &test.Suite{
+func newAzureTestSuite() *test.Suite[azure.Config] {
+ return &test.Suite[azure.Config]{
// do not use excessive data
MinimalData: true,
// NewConfig returns a config for a new temporary backend that will be used in tests.
- NewConfig: func() (interface{}, error) {
- azcfg, err := azure.ParseConfig(os.Getenv("RESTIC_TEST_AZURE_REPOSITORY"))
+ NewConfig: func() (*azure.Config, error) {
+ cfg, err := azure.ParseConfig(os.Getenv("RESTIC_TEST_AZURE_REPOSITORY"))
if err != nil {
return nil, err
}
- cfg := azcfg.(azure.Config)
- cfg.AccountName = os.Getenv("RESTIC_TEST_AZURE_ACCOUNT_NAME")
- cfg.AccountKey = options.NewSecretString(os.Getenv("RESTIC_TEST_AZURE_ACCOUNT_KEY"))
+ cfg.ApplyEnvironment("RESTIC_TEST_")
cfg.Prefix = fmt.Sprintf("test-%d", time.Now().UnixNano())
return cfg, nil
},
- // CreateFn is a function that creates a temporary repository for the tests.
- Create: func(config interface{}) (restic.Backend, error) {
- cfg := config.(azure.Config)
-
- ctx := context.TODO()
- be, err := azure.Create(ctx, cfg, tr)
- if err != nil {
- return nil, err
- }
-
- _, err = be.Stat(context.TODO(), restic.Handle{Type: restic.ConfigFile})
- if err != nil && !be.IsNotExist(err) {
- return nil, err
- }
-
- if err == nil {
- return nil, errors.New("config already exists")
- }
-
- return be, nil
- },
-
- // OpenFn is a function that opens a previously created temporary repository.
- Open: func(config interface{}) (restic.Backend, error) {
- cfg := config.(azure.Config)
- ctx := context.TODO()
- return azure.Open(ctx, cfg, tr)
- },
-
- // CleanupFn removes data created during the tests.
- Cleanup: func(config interface{}) error {
- cfg := config.(azure.Config)
- ctx := context.TODO()
- be, err := azure.Open(ctx, cfg, tr)
- if err != nil {
- return err
- }
-
- return be.Delete(context.TODO())
- },
+ Factory: azure.NewFactory(),
}
}
@@ -106,7 +59,7 @@ func TestBackendAzure(t *testing.T) {
}
t.Logf("run tests")
- newAzureTestSuite(t).RunTests(t)
+ newAzureTestSuite().RunTests(t)
}
func BenchmarkBackendAzure(t *testing.B) {
@@ -124,7 +77,7 @@ func BenchmarkBackendAzure(t *testing.B) {
}
t.Logf("run tests")
- newAzureTestSuite(t).RunBenchmarks(t)
+ newAzureTestSuite().RunBenchmarks(t)
}
func TestUploadLargeFile(t *testing.T) {
@@ -141,12 +94,11 @@ func TestUploadLargeFile(t *testing.T) {
return
}
- azcfg, err := azure.ParseConfig(os.Getenv("RESTIC_TEST_AZURE_REPOSITORY"))
+ cfg, err := azure.ParseConfig(os.Getenv("RESTIC_TEST_AZURE_REPOSITORY"))
if err != nil {
t.Fatal(err)
}
- cfg := azcfg.(azure.Config)
cfg.AccountName = os.Getenv("RESTIC_TEST_AZURE_ACCOUNT_NAME")
cfg.AccountKey = options.NewSecretString(os.Getenv("RESTIC_TEST_AZURE_ACCOUNT_KEY"))
cfg.Prefix = fmt.Sprintf("test-upload-large-%d", time.Now().UnixNano())
@@ -156,7 +108,7 @@ func TestUploadLargeFile(t *testing.T) {
t.Fatal(err)
}
- be, err := azure.Create(ctx, cfg, tr)
+ be, err := azure.Create(ctx, *cfg, tr)
if err != nil {
t.Fatal(err)
}
diff --git a/internal/backend/azure/config.go b/internal/backend/azure/config.go
index 55b26d4f1..d819b35aa 100644
--- a/internal/backend/azure/config.go
+++ b/internal/backend/azure/config.go
@@ -1,21 +1,24 @@
package azure
import (
+ "os"
"path"
"strings"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/options"
+ "github.com/restic/restic/internal/restic"
)
// Config contains all configuration necessary to connect to an azure compatible
// server.
type Config struct {
- AccountName string
- AccountSAS options.SecretString
- AccountKey options.SecretString
- Container string
- Prefix string
+ AccountName string
+ AccountSAS options.SecretString
+ AccountKey options.SecretString
+ EndpointSuffix string
+ Container string
+ Prefix string
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"`
}
@@ -33,7 +36,7 @@ func init() {
// ParseConfig parses the string s and extracts the azure config. The
// configuration format is azure:containerName:/[prefix].
-func ParseConfig(s string) (interface{}, error) {
+func ParseConfig(s string) (*Config, error) {
if !strings.HasPrefix(s, "azure:") {
return nil, errors.New("azure: invalid format")
}
@@ -51,5 +54,26 @@ func ParseConfig(s string) (interface{}, error) {
cfg := NewConfig()
cfg.Container = container
cfg.Prefix = prefix
- return cfg, nil
+ return &cfg, nil
+}
+
+var _ restic.ApplyEnvironmenter = &Config{}
+
+// ApplyEnvironment saves values from the environment to the config.
+func (cfg *Config) ApplyEnvironment(prefix string) {
+ if cfg.AccountName == "" {
+ cfg.AccountName = os.Getenv(prefix + "AZURE_ACCOUNT_NAME")
+ }
+
+ if cfg.AccountKey.String() == "" {
+ cfg.AccountKey = options.NewSecretString(os.Getenv(prefix + "AZURE_ACCOUNT_KEY"))
+ }
+
+ if cfg.AccountSAS.String() == "" {
+ cfg.AccountSAS = options.NewSecretString(os.Getenv(prefix + "AZURE_ACCOUNT_SAS"))
+ }
+
+ if cfg.EndpointSuffix == "" {
+ cfg.EndpointSuffix = os.Getenv(prefix + "AZURE_ENDPOINT_SUFFIX")
+ }
}
diff --git a/internal/backend/azure/config_test.go b/internal/backend/azure/config_test.go
index a57542e77..49cda6571 100644
--- a/internal/backend/azure/config_test.go
+++ b/internal/backend/azure/config_test.go
@@ -1,22 +1,23 @@
package azure
-import "testing"
+import (
+ "testing"
-var configTests = []struct {
- s string
- cfg Config
-}{
- {"azure:container-name:/", Config{
+ "github.com/restic/restic/internal/backend/test"
+)
+
+var configTests = []test.ConfigTestData[Config]{
+ {S: "azure:container-name:/", Cfg: Config{
Container: "container-name",
Prefix: "",
Connections: 5,
}},
- {"azure:container-name:/prefix/directory", Config{
+ {S: "azure:container-name:/prefix/directory", Cfg: Config{
Container: "container-name",
Prefix: "prefix/directory",
Connections: 5,
}},
- {"azure:container-name:/prefix/directory/", Config{
+ {S: "azure:container-name:/prefix/directory/", Cfg: Config{
Container: "container-name",
Prefix: "prefix/directory",
Connections: 5,
@@ -24,17 +25,5 @@ var configTests = []struct {
}
func TestParseConfig(t *testing.T) {
- for i, test := range configTests {
- cfg, err := ParseConfig(test.s)
- if err != nil {
- t.Errorf("test %d:%s failed: %v", i, test.s, err)
- continue
- }
-
- if cfg != test.cfg {
- t.Errorf("test %d:\ninput:\n %s\n wrong config, want:\n %v\ngot:\n %v",
- i, test.s, test.cfg, cfg)
- continue
- }
- }
+ test.ParseConfigTester(t, ParseConfig, configTests)
}
diff --git a/internal/backend/b2/b2.go b/internal/backend/b2/b2.go
index 40dbbf893..0bd3b994c 100644
--- a/internal/backend/b2/b2.go
+++ b/internal/backend/b2/b2.go
@@ -11,12 +11,11 @@ import (
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/layout"
- "github.com/restic/restic/internal/backend/sema"
+ "github.com/restic/restic/internal/backend/location"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
- "github.com/cenkalti/backoff/v4"
"github.com/kurin/blazer/b2"
"github.com/kurin/blazer/base"
)
@@ -28,7 +27,6 @@ type b2Backend struct {
cfg Config
listMaxItems int
layout.Layout
- sem sema.Semaphore
canDelete bool
}
@@ -39,6 +37,10 @@ const defaultListMaxItems = 10 * 1000
// ensure statically that *b2Backend implements restic.Backend.
var _ restic.Backend = &b2Backend{}
+func NewFactory() location.Factory {
+ return location.NewHTTPBackendFactory("b2", ParseConfig, location.NoPassword, Create, Open)
+}
+
type sniffingRoundTripper struct {
sync.Mutex
lastErr error
@@ -56,6 +58,13 @@ func (s *sniffingRoundTripper) RoundTrip(req *http.Request) (*http.Response, err
}
func newClient(ctx context.Context, cfg Config, rt http.RoundTripper) (*b2.Client, error) {
+ if cfg.AccountID == "" {
+ return nil, errors.Fatalf("unable to open B2 backend: Account ID ($B2_ACCOUNT_ID) is empty")
+ }
+ if cfg.Key.String() == "" {
+ return nil, errors.Fatalf("unable to open B2 backend: Key ($B2_ACCOUNT_KEY) is empty")
+ }
+
sniffer := &sniffingRoundTripper{RoundTripper: rt}
opts := []b2.ClientOption{b2.Transport(sniffer)}
@@ -92,11 +101,6 @@ func Open(ctx context.Context, cfg Config, rt http.RoundTripper) (restic.Backend
return nil, errors.Wrap(err, "Bucket")
}
- sem, err := sema.New(cfg.Connections)
- if err != nil {
- return nil, err
- }
-
be := &b2Backend{
client: client,
bucket: bucket,
@@ -106,7 +110,6 @@ func Open(ctx context.Context, cfg Config, rt http.RoundTripper) (restic.Backend
Path: cfg.Prefix,
},
listMaxItems: defaultListMaxItems,
- sem: sem,
canDelete: true,
}
@@ -134,11 +137,6 @@ func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (restic.Backe
return nil, errors.Wrap(err, "NewBucket")
}
- sem, err := sema.New(cfg.Connections)
- if err != nil {
- return nil, err
- }
-
be := &b2Backend{
client: client,
bucket: bucket,
@@ -148,18 +146,7 @@ func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (restic.Backe
Path: cfg.Prefix,
},
listMaxItems: defaultListMaxItems,
- sem: sem,
}
-
- _, err = be.Stat(ctx, restic.Handle{Type: restic.ConfigFile})
- if err != nil && !be.IsNotExist(err) {
- return nil, err
- }
-
- if err == nil {
- return nil, errors.New("config already exists")
- }
-
return be, nil
}
@@ -202,33 +189,18 @@ func (be *b2Backend) IsNotExist(err error) bool {
// Load runs fn with a reader that yields the contents of the file at h at the
// given offset.
func (be *b2Backend) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
return backend.DefaultLoad(ctx, h, length, offset, be.openReader, fn)
}
func (be *b2Backend) openReader(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
- debug.Log("Load %v, length %v, offset %v from %v", h, length, offset, be.Filename(h))
- if err := h.Valid(); err != nil {
- return nil, backoff.Permanent(err)
- }
-
- if offset < 0 {
- return nil, errors.New("offset is negative")
- }
-
- if length < 0 {
- return nil, errors.Errorf("invalid length %d", length)
- }
-
- ctx, cancel := context.WithCancel(ctx)
-
- be.sem.GetToken()
-
name := be.Layout.Filename(h)
obj := be.bucket.Object(name)
if offset == 0 && length == 0 {
- rd := obj.NewReader(ctx)
- return be.sem.ReleaseTokenOnClose(rd, cancel), nil
+ return obj.NewReader(ctx), nil
}
// pass a negative length to NewRangeReader so that the remainder of the
@@ -237,8 +209,7 @@ func (be *b2Backend) openReader(ctx context.Context, h restic.Handle, length int
length = -1
}
- rd := obj.NewRangeReader(ctx, offset, int64(length))
- return be.sem.ReleaseTokenOnClose(rd, cancel), nil
+ return obj.NewRangeReader(ctx, offset, int64(length)), nil
}
// Save stores data in the backend at the handle.
@@ -246,21 +217,12 @@ func (be *b2Backend) Save(ctx context.Context, h restic.Handle, rd restic.Rewind
ctx, cancel := context.WithCancel(ctx)
defer cancel()
- if err := h.Valid(); err != nil {
- return backoff.Permanent(err)
- }
-
- be.sem.GetToken()
- defer be.sem.ReleaseToken()
-
name := be.Filename(h)
- debug.Log("Save %v, name %v", h, name)
obj := be.bucket.Object(name)
// b2 always requires sha1 checksums for uploaded file parts
w := obj.NewWriter(ctx)
n, err := io.Copy(w, rd)
- debug.Log(" saved %d bytes, err %v", n, err)
if err != nil {
_ = w.Close()
@@ -276,16 +238,10 @@ func (be *b2Backend) Save(ctx context.Context, h restic.Handle, rd restic.Rewind
// Stat returns information about a blob.
func (be *b2Backend) Stat(ctx context.Context, h restic.Handle) (bi restic.FileInfo, err error) {
- debug.Log("Stat %v", h)
-
- be.sem.GetToken()
- defer be.sem.ReleaseToken()
-
name := be.Filename(h)
obj := be.bucket.Object(name)
info, err := obj.Attrs(ctx)
if err != nil {
- debug.Log("Attrs() err %v", err)
return restic.FileInfo{}, errors.Wrap(err, "Stat")
}
return restic.FileInfo{Size: info.Size, Name: h.Name}, nil
@@ -293,11 +249,6 @@ func (be *b2Backend) Stat(ctx context.Context, h restic.Handle) (bi restic.FileI
// Remove removes the blob with the given name and type.
func (be *b2Backend) Remove(ctx context.Context, h restic.Handle) error {
- debug.Log("Remove %v", h)
-
- be.sem.GetToken()
- defer be.sem.ReleaseToken()
-
// the retry backend will also repeat the remove method up to 10 times
for i := 0; i < 3; i++ {
obj := be.bucket.Object(be.Filename(h))
@@ -332,22 +283,13 @@ func (be *b2Backend) Remove(ctx context.Context, h restic.Handle) error {
return errors.New("failed to delete all file versions")
}
-type semLocker struct {
- sema.Semaphore
-}
-
-func (sm *semLocker) Lock() { sm.GetToken() }
-func (sm *semLocker) Unlock() { sm.ReleaseToken() }
-
// List returns a channel that yields all names of blobs of type t.
func (be *b2Backend) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error {
- debug.Log("List %v", t)
-
ctx, cancel := context.WithCancel(ctx)
defer cancel()
prefix, _ := be.Basedir(t)
- iter := be.bucket.List(ctx, b2.ListPrefix(prefix), b2.ListPageSize(be.listMaxItems), b2.ListLocker(&semLocker{be.sem}))
+ iter := be.bucket.List(ctx, b2.ListPrefix(prefix), b2.ListPageSize(be.listMaxItems))
for iter.Next() {
obj := iter.Object()
@@ -366,42 +308,12 @@ func (be *b2Backend) List(ctx context.Context, t restic.FileType, fn func(restic
return err
}
}
- if err := iter.Err(); err != nil {
- debug.Log("List: %v", err)
- return err
- }
- return nil
-}
-
-// Remove keys for a specified backend type.
-func (be *b2Backend) removeKeys(ctx context.Context, t restic.FileType) error {
- debug.Log("removeKeys %v", t)
- return be.List(ctx, t, func(fi restic.FileInfo) error {
- return be.Remove(ctx, restic.Handle{Type: t, Name: fi.Name})
- })
+ return iter.Err()
}
// Delete removes all restic keys in the bucket. It will not remove the bucket itself.
func (be *b2Backend) Delete(ctx context.Context) error {
- alltypes := []restic.FileType{
- restic.PackFile,
- restic.KeyFile,
- restic.LockFile,
- restic.SnapshotFile,
- restic.IndexFile}
-
- for _, t := range alltypes {
- err := be.removeKeys(ctx, t)
- if err != nil {
- return nil
- }
- }
- err := be.Remove(ctx, restic.Handle{Type: restic.ConfigFile})
- if err != nil && be.IsNotExist(err) {
- err = nil
- }
-
- return err
+ return backend.DefaultDelete(ctx, be)
}
// Close does nothing
diff --git a/internal/backend/b2/b2_test.go b/internal/backend/b2/b2_test.go
index 123a61d7c..ab1dcd37b 100644
--- a/internal/backend/b2/b2_test.go
+++ b/internal/backend/b2/b2_test.go
@@ -1,28 +1,19 @@
package b2_test
import (
- "context"
"fmt"
"os"
"testing"
"time"
- "github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/b2"
"github.com/restic/restic/internal/backend/test"
- "github.com/restic/restic/internal/options"
- "github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
-func newB2TestSuite(t testing.TB) *test.Suite {
- tr, err := backend.Transport(backend.TransportOptions{})
- if err != nil {
- t.Fatalf("cannot create transport for tests: %v", err)
- }
-
- return &test.Suite{
+func newB2TestSuite() *test.Suite[b2.Config] {
+ return &test.Suite[b2.Config]{
// do not use excessive data
MinimalData: true,
@@ -30,41 +21,18 @@ func newB2TestSuite(t testing.TB) *test.Suite {
WaitForDelayedRemoval: 10 * time.Second,
// NewConfig returns a config for a new temporary backend that will be used in tests.
- NewConfig: func() (interface{}, error) {
- b2cfg, err := b2.ParseConfig(os.Getenv("RESTIC_TEST_B2_REPOSITORY"))
+ NewConfig: func() (*b2.Config, error) {
+ cfg, err := b2.ParseConfig(os.Getenv("RESTIC_TEST_B2_REPOSITORY"))
if err != nil {
return nil, err
}
- cfg := b2cfg.(b2.Config)
- cfg.AccountID = os.Getenv("RESTIC_TEST_B2_ACCOUNT_ID")
- cfg.Key = options.NewSecretString(os.Getenv("RESTIC_TEST_B2_ACCOUNT_KEY"))
+ cfg.ApplyEnvironment("RESTIC_TEST_")
cfg.Prefix = fmt.Sprintf("test-%d", time.Now().UnixNano())
return cfg, nil
},
- // CreateFn is a function that creates a temporary repository for the tests.
- Create: func(config interface{}) (restic.Backend, error) {
- cfg := config.(b2.Config)
- return b2.Create(context.Background(), cfg, tr)
- },
-
- // OpenFn is a function that opens a previously created temporary repository.
- Open: func(config interface{}) (restic.Backend, error) {
- cfg := config.(b2.Config)
- return b2.Open(context.Background(), cfg, tr)
- },
-
- // CleanupFn removes data created during the tests.
- Cleanup: func(config interface{}) error {
- cfg := config.(b2.Config)
- be, err := b2.Open(context.Background(), cfg, tr)
- if err != nil {
- return err
- }
-
- return be.Delete(context.TODO())
- },
+ Factory: b2.NewFactory(),
}
}
@@ -91,10 +59,10 @@ func TestBackendB2(t *testing.T) {
}()
testVars(t)
- newB2TestSuite(t).RunTests(t)
+ newB2TestSuite().RunTests(t)
}
func BenchmarkBackendb2(t *testing.B) {
testVars(t)
- newB2TestSuite(t).RunBenchmarks(t)
+ newB2TestSuite().RunBenchmarks(t)
}
diff --git a/internal/backend/b2/config.go b/internal/backend/b2/config.go
index ba5141834..94614e44f 100644
--- a/internal/backend/b2/config.go
+++ b/internal/backend/b2/config.go
@@ -1,12 +1,14 @@
package b2
import (
+ "os"
"path"
"regexp"
"strings"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/options"
+ "github.com/restic/restic/internal/restic"
)
// Config contains all configuration necessary to connect to an b2 compatible
@@ -58,7 +60,7 @@ func checkBucketName(name string) error {
// ParseConfig parses the string s and extracts the b2 config. The supported
// configuration format is b2:bucketname/prefix. If no prefix is given the
// prefix "restic" will be used.
-func ParseConfig(s string) (interface{}, error) {
+func ParseConfig(s string) (*Config, error) {
if !strings.HasPrefix(s, "b2:") {
return nil, errors.New("invalid format, want: b2:bucket-name[:path]")
}
@@ -77,5 +79,17 @@ func ParseConfig(s string) (interface{}, error) {
cfg.Bucket = bucket
cfg.Prefix = prefix
- return cfg, nil
+ return &cfg, nil
+}
+
+var _ restic.ApplyEnvironmenter = &Config{}
+
+// ApplyEnvironment saves values from the environment to the config.
+func (cfg *Config) ApplyEnvironment(prefix string) {
+ if cfg.AccountID == "" {
+ cfg.AccountID = os.Getenv(prefix + "B2_ACCOUNT_ID")
+ }
+ if cfg.Key.String() == "" {
+ cfg.Key = options.NewSecretString(os.Getenv(prefix + "B2_ACCOUNT_KEY"))
+ }
}
diff --git a/internal/backend/b2/config_test.go b/internal/backend/b2/config_test.go
index 4194cb62c..f62972005 100644
--- a/internal/backend/b2/config_test.go
+++ b/internal/backend/b2/config_test.go
@@ -1,37 +1,38 @@
package b2
-import "testing"
+import (
+ "testing"
-var configTests = []struct {
- s string
- cfg Config
-}{
- {"b2:bucketname", Config{
+ "github.com/restic/restic/internal/backend/test"
+)
+
+var configTests = []test.ConfigTestData[Config]{
+ {S: "b2:bucketname", Cfg: Config{
Bucket: "bucketname",
Prefix: "",
Connections: 5,
}},
- {"b2:bucketname:", Config{
+ {S: "b2:bucketname:", Cfg: Config{
Bucket: "bucketname",
Prefix: "",
Connections: 5,
}},
- {"b2:bucketname:/prefix/directory", Config{
+ {S: "b2:bucketname:/prefix/directory", Cfg: Config{
Bucket: "bucketname",
Prefix: "prefix/directory",
Connections: 5,
}},
- {"b2:foobar", Config{
+ {S: "b2:foobar", Cfg: Config{
Bucket: "foobar",
Prefix: "",
Connections: 5,
}},
- {"b2:foobar:", Config{
+ {S: "b2:foobar:", Cfg: Config{
Bucket: "foobar",
Prefix: "",
Connections: 5,
}},
- {"b2:foobar:/", Config{
+ {S: "b2:foobar:/", Cfg: Config{
Bucket: "foobar",
Prefix: "",
Connections: 5,
@@ -39,19 +40,7 @@ var configTests = []struct {
}
func TestParseConfig(t *testing.T) {
- for _, test := range configTests {
- t.Run("", func(t *testing.T) {
- cfg, err := ParseConfig(test.s)
- if err != nil {
- t.Fatalf("%s failed: %v", test.s, err)
- }
-
- if cfg != test.cfg {
- t.Fatalf("input: %s\n wrong config, want:\n %#v\ngot:\n %#v",
- test.s, test.cfg, cfg)
- }
- })
- }
+ test.ParseConfigTester(t, ParseConfig, configTests)
}
var invalidConfigTests = []struct {
diff --git a/internal/backend/dryrun/dry_backend.go b/internal/backend/dryrun/dry_backend.go
index 37569c320..f7acb10dd 100644
--- a/internal/backend/dryrun/dry_backend.go
+++ b/internal/backend/dryrun/dry_backend.go
@@ -18,10 +18,9 @@ type Backend struct {
b restic.Backend
}
-// statically ensure that RetryBackend implements restic.Backend.
+// statically ensure that Backend implements restic.Backend.
var _ restic.Backend = &Backend{}
-// New returns a new backend that saves all data in a map in memory.
func New(be restic.Backend) *Backend {
b := &Backend{b: be}
debug.Log("created new dry backend")
@@ -29,19 +28,17 @@ func New(be restic.Backend) *Backend {
}
// Save adds new Data to the backend.
-func (be *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
+func (be *Backend) Save(_ context.Context, h restic.Handle, _ restic.RewindReader) error {
if err := h.Valid(); err != nil {
return err
}
- debug.Log("faked saving %v bytes at %v", rd.Length(), h)
-
// don't save anything, just return ok
return nil
}
// Remove deletes a file from the backend.
-func (be *Backend) Remove(ctx context.Context, h restic.Handle) error {
+func (be *Backend) Remove(_ context.Context, _ restic.Handle) error {
return nil
}
@@ -55,7 +52,7 @@ func (be *Backend) Location() string {
}
// Delete removes all data in the backend.
-func (be *Backend) Delete(ctx context.Context) error {
+func (be *Backend) Delete(_ context.Context) error {
return nil
}
diff --git a/internal/backend/dryrun/dry_backend_test.go b/internal/backend/dryrun/dry_backend_test.go
index 6b8f74e0f..69716c340 100644
--- a/internal/backend/dryrun/dry_backend_test.go
+++ b/internal/backend/dryrun/dry_backend_test.go
@@ -40,11 +40,9 @@ func TestDry(t *testing.T) {
{d, "delete", "", "", ""},
{d, "stat", "a", "", "not found"},
{d, "list", "", "", ""},
- {d, "save", "", "", "invalid"},
{m, "save", "a", "baz", ""}, // save a directly to the mem backend
{d, "save", "b", "foob", ""}, // b is not saved
{d, "save", "b", "xxx", ""}, // no error as b is not saved
- {d, "stat", "", "", "invalid"},
{d, "stat", "a", "a 3", ""},
{d, "load", "a", "baz", ""},
{d, "load", "b", "", "not found"},
diff --git a/internal/backend/foreground_unix.go b/internal/backend/foreground_unix.go
index 2b59bdf6c..fcc0dfe78 100644
--- a/internal/backend/foreground_unix.go
+++ b/internal/backend/foreground_unix.go
@@ -7,21 +7,17 @@ import (
"os"
"os/exec"
"os/signal"
- "syscall"
- "unsafe"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
+
+ "golang.org/x/sys/unix"
)
func tcsetpgrp(fd int, pid int) error {
- _, _, errno := syscall.RawSyscall(syscall.SYS_IOCTL, uintptr(fd),
- uintptr(syscall.TIOCSPGRP), uintptr(unsafe.Pointer(&pid)))
- if errno == 0 {
- return nil
- }
-
- return errno
+ // IoctlSetPointerInt silently casts to int32 internally,
+ // so this assumes pid fits in 31 bits.
+ return unix.IoctlSetPointerInt(fd, unix.TIOCSPGRP, pid)
}
func startForeground(cmd *exec.Cmd) (bg func() error, err error) {
@@ -35,11 +31,11 @@ func startForeground(cmd *exec.Cmd) (bg func() error, err error) {
return bg, cmd.Start()
}
- signal.Ignore(syscall.SIGTTIN)
- signal.Ignore(syscall.SIGTTOU)
+ signal.Ignore(unix.SIGTTIN)
+ signal.Ignore(unix.SIGTTOU)
// run the command in its own process group
- cmd.SysProcAttr = &syscall.SysProcAttr{
+ cmd.SysProcAttr = &unix.SysProcAttr{
Setpgid: true,
}
@@ -51,7 +47,7 @@ func startForeground(cmd *exec.Cmd) (bg func() error, err error) {
}
// move the command's process group into the foreground
- prev := syscall.Getpgrp()
+ prev := unix.Getpgrp()
err = tcsetpgrp(int(tty.Fd()), cmd.Process.Pid)
if err != nil {
_ = tty.Close()
@@ -59,8 +55,8 @@ func startForeground(cmd *exec.Cmd) (bg func() error, err error) {
}
bg = func() error {
- signal.Reset(syscall.SIGTTIN)
- signal.Reset(syscall.SIGTTOU)
+ signal.Reset(unix.SIGTTIN)
+ signal.Reset(unix.SIGTTOU)
// reset the foreground process group
err = tcsetpgrp(int(tty.Fd()), prev)
diff --git a/internal/backend/gs/config.go b/internal/backend/gs/config.go
index 33aec4c99..61a31113f 100644
--- a/internal/backend/gs/config.go
+++ b/internal/backend/gs/config.go
@@ -1,11 +1,13 @@
package gs
import (
+ "os"
"path"
"strings"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/options"
+ "github.com/restic/restic/internal/restic"
)
// Config contains all configuration necessary to connect to a Google Cloud Storage
@@ -16,13 +18,15 @@ type Config struct {
Bucket string
Prefix string
- Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"`
+ Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"`
+ Region string `option:"region" help:"region to create the bucket in (default: us)"`
}
// NewConfig returns a new Config with the default values filled in.
func NewConfig() Config {
return Config{
Connections: 5,
+ Region: "us",
}
}
@@ -32,7 +36,7 @@ func init() {
// ParseConfig parses the string s and extracts the gcs config. The
// supported configuration format is gs:bucketName:/[prefix].
-func ParseConfig(s string) (interface{}, error) {
+func ParseConfig(s string) (*Config, error) {
if !strings.HasPrefix(s, "gs:") {
return nil, errors.New("gs: invalid format")
}
@@ -52,5 +56,14 @@ func ParseConfig(s string) (interface{}, error) {
cfg := NewConfig()
cfg.Bucket = bucket
cfg.Prefix = prefix
- return cfg, nil
+ return &cfg, nil
+}
+
+var _ restic.ApplyEnvironmenter = &Config{}
+
+// ApplyEnvironment saves values from the environment to the config.
+func (cfg *Config) ApplyEnvironment(prefix string) {
+ if cfg.ProjectID == "" {
+ cfg.ProjectID = os.Getenv(prefix + "GOOGLE_PROJECT_ID")
+ }
}
diff --git a/internal/backend/gs/config_test.go b/internal/backend/gs/config_test.go
index fb03e3a20..890de577f 100644
--- a/internal/backend/gs/config_test.go
+++ b/internal/backend/gs/config_test.go
@@ -1,40 +1,32 @@
package gs
-import "testing"
+import (
+ "testing"
-var configTests = []struct {
- s string
- cfg Config
-}{
- {"gs:bucketname:/", Config{
+ "github.com/restic/restic/internal/backend/test"
+)
+
+var configTests = []test.ConfigTestData[Config]{
+ {S: "gs:bucketname:/", Cfg: Config{
Bucket: "bucketname",
Prefix: "",
Connections: 5,
+ Region: "us",
}},
- {"gs:bucketname:/prefix/directory", Config{
+ {S: "gs:bucketname:/prefix/directory", Cfg: Config{
Bucket: "bucketname",
Prefix: "prefix/directory",
Connections: 5,
+ Region: "us",
}},
- {"gs:bucketname:/prefix/directory/", Config{
+ {S: "gs:bucketname:/prefix/directory/", Cfg: Config{
Bucket: "bucketname",
Prefix: "prefix/directory",
Connections: 5,
+ Region: "us",
}},
}
func TestParseConfig(t *testing.T) {
- for i, test := range configTests {
- cfg, err := ParseConfig(test.s)
- if err != nil {
- t.Errorf("test %d:%s failed: %v", i, test.s, err)
- continue
- }
-
- if cfg != test.cfg {
- t.Errorf("test %d:\ninput:\n %s\n wrong config, want:\n %v\ngot:\n %v",
- i, test.s, test.cfg, cfg)
- continue
- }
- }
+ test.ParseConfigTester(t, ParseConfig, configTests)
}
diff --git a/internal/backend/gs/gs.go b/internal/backend/gs/gs.go
index 77cbcda97..5c12654d6 100644
--- a/internal/backend/gs/gs.go
+++ b/internal/backend/gs/gs.go
@@ -15,7 +15,7 @@ import (
"github.com/pkg/errors"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/layout"
- "github.com/restic/restic/internal/backend/sema"
+ "github.com/restic/restic/internal/backend/location"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/restic"
@@ -37,8 +37,8 @@ type Backend struct {
gcsClient *storage.Client
projectID string
connections uint
- sem sema.Semaphore
bucketName string
+ region string
bucket *storage.BucketHandle
prefix string
listMaxItems int
@@ -48,6 +48,10 @@ type Backend struct {
// Ensure that *Backend implements restic.Backend.
var _ restic.Backend = &Backend{}
+func NewFactory() location.Factory {
+ return location.NewHTTPBackendFactory("gs", ParseConfig, location.NoPassword, Create, Open)
+}
+
func getStorageClient(rt http.RoundTripper) (*storage.Client, error) {
// create a new HTTP client
httpClient := &http.Client{
@@ -99,17 +103,12 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) {
return nil, errors.Wrap(err, "getStorageClient")
}
- sem, err := sema.New(cfg.Connections)
- if err != nil {
- return nil, err
- }
-
be := &Backend{
gcsClient: gcsClient,
projectID: cfg.ProjectID,
connections: cfg.Connections,
- sem: sem,
bucketName: cfg.Bucket,
+ region: cfg.Region,
bucket: gcsClient.Bucket(cfg.Bucket),
prefix: cfg.Prefix,
Layout: &layout.DefaultLayout{
@@ -123,7 +122,7 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) {
}
// Open opens the gs backend at the specified bucket.
-func Open(cfg Config, rt http.RoundTripper) (restic.Backend, error) {
+func Open(_ context.Context, cfg Config, rt http.RoundTripper) (restic.Backend, error) {
return open(cfg, rt)
}
@@ -132,14 +131,13 @@ func Open(cfg Config, rt http.RoundTripper) (restic.Backend, error) {
//
// The service account must have the "storage.buckets.create" permission to
// create a bucket the does not yet exist.
-func Create(cfg Config, rt http.RoundTripper) (restic.Backend, error) {
+func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (restic.Backend, error) {
be, err := open(cfg, rt)
if err != nil {
return nil, errors.Wrap(err, "open")
}
// Try to determine if the bucket exists. If it does not, try to create it.
- ctx := context.Background()
exists, err := be.bucketExists(ctx, be.bucket)
if err != nil {
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusForbidden {
@@ -151,8 +149,11 @@ func Create(cfg Config, rt http.RoundTripper) (restic.Backend, error) {
}
if !exists {
+ bucketAttrs := &storage.BucketAttrs{
+ Location: cfg.Region,
+ }
// Bucket doesn't exist, try to create it.
- if err := be.bucket.Create(ctx, be.projectID, nil); err != nil {
+ if err := be.bucket.Create(ctx, be.projectID, bucketAttrs); err != nil {
// Always an error, as the bucket definitely doesn't exist.
return nil, errors.Wrap(err, "service.Buckets.Insert")
}
@@ -169,7 +170,6 @@ func (be *Backend) SetListMaxItems(i int) {
// IsNotExist returns true if the error is caused by a not existing file.
func (be *Backend) IsNotExist(err error) bool {
- debug.Log("IsNotExist(%T, %#v)", err, err)
return errors.Is(err, storage.ErrObjectNotExist)
}
@@ -204,18 +204,8 @@ func (be *Backend) Path() string {
// Save stores data in the backend at the handle.
func (be *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
- if err := h.Valid(); err != nil {
- return err
- }
-
objName := be.Filename(h)
- debug.Log("Save %v at %v", h, objName)
-
- be.sem.GetToken()
-
- debug.Log("InsertObject(%v, %v)", be.bucketName, objName)
-
// Set chunk size to zero to disable resumable uploads.
//
// With a non-zero chunk size (the default is
@@ -250,14 +240,10 @@ func (be *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindRe
err = cerr
}
- be.sem.ReleaseToken()
-
if err != nil {
- debug.Log("%v: err %#v: %v", objName, err, err)
return errors.Wrap(err, "service.Objects.Insert")
}
- debug.Log("%v -> %v bytes", objName, wbytes)
// sanity check
if wbytes != rd.Length() {
return errors.Errorf("wrote %d bytes instead of the expected %d bytes", wbytes, rd.Length())
@@ -268,22 +254,13 @@ func (be *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindRe
// Load runs fn with a reader that yields the contents of the file at h at the
// given offset.
func (be *Backend) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
return backend.DefaultLoad(ctx, h, length, offset, be.openReader, fn)
}
func (be *Backend) openReader(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
- debug.Log("Load %v, length %v, offset %v from %v", h, length, offset, be.Filename(h))
- if err := h.Valid(); err != nil {
- return nil, err
- }
-
- if offset < 0 {
- return nil, errors.New("offset is negative")
- }
-
- if length < 0 {
- return nil, errors.Errorf("invalid length %d", length)
- }
if length == 0 {
// negative length indicates read till end to GCS lib
length = -1
@@ -291,32 +268,21 @@ func (be *Backend) openReader(ctx context.Context, h restic.Handle, length int,
objName := be.Filename(h)
- be.sem.GetToken()
-
- ctx, cancel := context.WithCancel(ctx)
-
r, err := be.bucket.Object(objName).NewRangeReader(ctx, offset, int64(length))
if err != nil {
- cancel()
- be.sem.ReleaseToken()
return nil, err
}
- return be.sem.ReleaseTokenOnClose(r, cancel), err
+ return r, err
}
// Stat returns information about a blob.
func (be *Backend) Stat(ctx context.Context, h restic.Handle) (bi restic.FileInfo, err error) {
- debug.Log("%v", h)
-
objName := be.Filename(h)
- be.sem.GetToken()
attr, err := be.bucket.Object(objName).Attrs(ctx)
- be.sem.ReleaseToken()
if err != nil {
- debug.Log("GetObjectAttributes() err %v", err)
return restic.FileInfo{}, errors.Wrap(err, "service.Objects.Get")
}
@@ -327,23 +293,18 @@ func (be *Backend) Stat(ctx context.Context, h restic.Handle) (bi restic.FileInf
func (be *Backend) Remove(ctx context.Context, h restic.Handle) error {
objName := be.Filename(h)
- be.sem.GetToken()
err := be.bucket.Object(objName).Delete(ctx)
- be.sem.ReleaseToken()
- if err == storage.ErrObjectNotExist {
+ if be.IsNotExist(err) {
err = nil
}
- debug.Log("Remove(%v) at %v -> err %v", h, objName, err)
return errors.Wrap(err, "client.RemoveObject")
}
// List runs fn for each file in the backend which has the type t. When an
// error occurs (or fn returns an error), List stops and returns it.
func (be *Backend) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error {
- debug.Log("listing %v", t)
-
prefix, _ := be.Basedir(t)
// make sure prefix ends with a slash
@@ -357,9 +318,7 @@ func (be *Backend) List(ctx context.Context, t restic.FileType, fn func(restic.F
itr := be.bucket.Objects(ctx, &storage.Query{Prefix: prefix})
for {
- be.sem.GetToken()
attrs, err := itr.Next()
- be.sem.ReleaseToken()
if err == iterator.Done {
break
}
@@ -389,30 +348,9 @@ func (be *Backend) List(ctx context.Context, t restic.FileType, fn func(restic.F
return ctx.Err()
}
-// Remove keys for a specified backend type.
-func (be *Backend) removeKeys(ctx context.Context, t restic.FileType) error {
- return be.List(ctx, t, func(fi restic.FileInfo) error {
- return be.Remove(ctx, restic.Handle{Type: t, Name: fi.Name})
- })
-}
-
// Delete removes all restic keys in the bucket. It will not remove the bucket itself.
func (be *Backend) Delete(ctx context.Context) error {
- alltypes := []restic.FileType{
- restic.PackFile,
- restic.KeyFile,
- restic.LockFile,
- restic.SnapshotFile,
- restic.IndexFile}
-
- for _, t := range alltypes {
- err := be.removeKeys(ctx, t)
- if err != nil {
- return nil
- }
- }
-
- return be.Remove(ctx, restic.Handle{Type: restic.ConfigFile})
+ return backend.DefaultDelete(ctx, be)
}
// Close does nothing.
diff --git a/internal/backend/gs/gs_test.go b/internal/backend/gs/gs_test.go
index 77f8986f1..22953cad3 100644
--- a/internal/backend/gs/gs_test.go
+++ b/internal/backend/gs/gs_test.go
@@ -1,81 +1,34 @@
package gs_test
import (
- "context"
"fmt"
"os"
"testing"
"time"
- "github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/gs"
"github.com/restic/restic/internal/backend/test"
- "github.com/restic/restic/internal/errors"
- "github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
-func newGSTestSuite(t testing.TB) *test.Suite {
- tr, err := backend.Transport(backend.TransportOptions{})
- if err != nil {
- t.Fatalf("cannot create transport for tests: %v", err)
- }
-
- return &test.Suite{
+func newGSTestSuite() *test.Suite[gs.Config] {
+ return &test.Suite[gs.Config]{
// do not use excessive data
MinimalData: true,
// NewConfig returns a config for a new temporary backend that will be used in tests.
- NewConfig: func() (interface{}, error) {
- gscfg, err := gs.ParseConfig(os.Getenv("RESTIC_TEST_GS_REPOSITORY"))
+ NewConfig: func() (*gs.Config, error) {
+ cfg, err := gs.ParseConfig(os.Getenv("RESTIC_TEST_GS_REPOSITORY"))
if err != nil {
return nil, err
}
- cfg := gscfg.(gs.Config)
cfg.ProjectID = os.Getenv("RESTIC_TEST_GS_PROJECT_ID")
cfg.Prefix = fmt.Sprintf("test-%d", time.Now().UnixNano())
return cfg, nil
},
- // CreateFn is a function that creates a temporary repository for the tests.
- Create: func(config interface{}) (restic.Backend, error) {
- cfg := config.(gs.Config)
-
- be, err := gs.Create(cfg, tr)
- if err != nil {
- return nil, err
- }
-
- _, err = be.Stat(context.TODO(), restic.Handle{Type: restic.ConfigFile})
- if err != nil && !be.IsNotExist(err) {
- return nil, err
- }
-
- if err == nil {
- return nil, errors.New("config already exists")
- }
-
- return be, nil
- },
-
- // OpenFn is a function that opens a previously created temporary repository.
- Open: func(config interface{}) (restic.Backend, error) {
- cfg := config.(gs.Config)
- return gs.Open(cfg, tr)
- },
-
- // CleanupFn removes data created during the tests.
- Cleanup: func(config interface{}) error {
- cfg := config.(gs.Config)
-
- be, err := gs.Open(cfg, tr)
- if err != nil {
- return err
- }
-
- return be.Delete(context.TODO())
- },
+ Factory: gs.NewFactory(),
}
}
@@ -103,7 +56,7 @@ func TestBackendGS(t *testing.T) {
}
t.Logf("run tests")
- newGSTestSuite(t).RunTests(t)
+ newGSTestSuite().RunTests(t)
}
func BenchmarkBackendGS(t *testing.B) {
@@ -124,5 +77,5 @@ func BenchmarkBackendGS(t *testing.B) {
}
t.Logf("run tests")
- newGSTestSuite(t).RunBenchmarks(t)
+ newGSTestSuite().RunBenchmarks(t)
}
diff --git a/internal/backend/layout/layout.go b/internal/backend/layout/layout.go
index 14fb8dcdc..b83f4c05b 100644
--- a/internal/backend/layout/layout.go
+++ b/internal/backend/layout/layout.go
@@ -37,7 +37,7 @@ type LocalFilesystem struct {
}
// ReadDir returns all entries of a directory.
-func (l *LocalFilesystem) ReadDir(ctx context.Context, dir string) ([]os.FileInfo, error) {
+func (l *LocalFilesystem) ReadDir(_ context.Context, dir string) ([]os.FileInfo, error) {
f, err := fs.Open(dir)
if err != nil {
return nil, err
diff --git a/internal/backend/limiter/limiter_backend.go b/internal/backend/limiter/limiter_backend.go
index f1b508327..a91794037 100644
--- a/internal/backend/limiter/limiter_backend.go
+++ b/internal/backend/limiter/limiter_backend.go
@@ -7,6 +7,21 @@ import (
"github.com/restic/restic/internal/restic"
)
+func WrapBackendConstructor[B restic.Backend, C any](constructor func(ctx context.Context, cfg C) (B, error)) func(ctx context.Context, cfg C, lim Limiter) (restic.Backend, error) {
+ return func(ctx context.Context, cfg C, lim Limiter) (restic.Backend, error) {
+ var be restic.Backend
+ be, err := constructor(ctx, cfg)
+ if err != nil {
+ return nil, err
+ }
+
+ if lim != nil {
+ be = LimitBackend(be, lim)
+ }
+ return be, nil
+ }
+}
+
// LimitBackend wraps a Backend and applies rate limiting to Load() and Save()
// calls on the backend.
func LimitBackend(be restic.Backend, l Limiter) restic.Backend {
@@ -46,6 +61,8 @@ func (r rateLimitedBackend) Load(ctx context.Context, h restic.Handle, length in
})
}
+func (r rateLimitedBackend) Unwrap() restic.Backend { return r.Backend }
+
type limitedReader struct {
io.Reader
writerTo io.WriterTo
diff --git a/internal/backend/local/config.go b/internal/backend/local/config.go
index e59d1f693..dc5e7948c 100644
--- a/internal/backend/local/config.go
+++ b/internal/backend/local/config.go
@@ -27,12 +27,12 @@ func init() {
}
// ParseConfig parses a local backend config.
-func ParseConfig(s string) (interface{}, error) {
+func ParseConfig(s string) (*Config, error) {
if !strings.HasPrefix(s, "local:") {
return nil, errors.New(`invalid format, prefix "local" not found`)
}
cfg := NewConfig()
cfg.Path = s[6:]
- return cfg, nil
+ return &cfg, nil
}
diff --git a/internal/backend/local/config_test.go b/internal/backend/local/config_test.go
new file mode 100644
index 000000000..4c2ebc7bc
--- /dev/null
+++ b/internal/backend/local/config_test.go
@@ -0,0 +1,46 @@
+package local
+
+import (
+ "testing"
+
+ "github.com/restic/restic/internal/backend/test"
+)
+
+var configTests = []test.ConfigTestData[Config]{
+ {S: "local:/some/path", Cfg: Config{
+ Path: "/some/path",
+ Connections: 2,
+ }},
+ {S: "local:dir1/dir2", Cfg: Config{
+ Path: "dir1/dir2",
+ Connections: 2,
+ }},
+ {S: "local:../dir1/dir2", Cfg: Config{
+ Path: "../dir1/dir2",
+ Connections: 2,
+ }},
+ {S: "local:/dir1:foobar/dir2", Cfg: Config{
+ Path: "/dir1:foobar/dir2",
+ Connections: 2,
+ }},
+ {S: `local:\dir1\foobar\dir2`, Cfg: Config{
+ Path: `\dir1\foobar\dir2`,
+ Connections: 2,
+ }},
+ {S: `local:c:\dir1\foobar\dir2`, Cfg: Config{
+ Path: `c:\dir1\foobar\dir2`,
+ Connections: 2,
+ }},
+ {S: `local:C:\Users\appveyor\AppData\Local\Temp\1\restic-test-879453535\repo`, Cfg: Config{
+ Path: `C:\Users\appveyor\AppData\Local\Temp\1\restic-test-879453535\repo`,
+ Connections: 2,
+ }},
+ {S: `local:c:/dir1/foobar/dir2`, Cfg: Config{
+ Path: `c:/dir1/foobar/dir2`,
+ Connections: 2,
+ }},
+}
+
+func TestParseConfig(t *testing.T) {
+ test.ParseConfigTester(t, ParseConfig, configTests)
+}
diff --git a/internal/backend/local/local.go b/internal/backend/local/local.go
index 1716e0f07..4198102c2 100644
--- a/internal/backend/local/local.go
+++ b/internal/backend/local/local.go
@@ -10,7 +10,8 @@ import (
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/layout"
- "github.com/restic/restic/internal/backend/sema"
+ "github.com/restic/restic/internal/backend/limiter"
+ "github.com/restic/restic/internal/backend/location"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/fs"
@@ -22,7 +23,6 @@ import (
// Local is a backend in a local directory.
type Local struct {
Config
- sem sema.Semaphore
layout.Layout
backend.Modes
}
@@ -30,6 +30,10 @@ type Local struct {
// ensure statically that *Local implements restic.Backend.
var _ restic.Backend = &Local{}
+func NewFactory() location.Factory {
+ return location.NewLimitedBackendFactory("local", ParseConfig, location.NoPassword, limiter.WrapBackendConstructor(Create), limiter.WrapBackendConstructor(Open))
+}
+
const defaultLayout = "default"
func open(ctx context.Context, cfg Config) (*Local, error) {
@@ -38,11 +42,6 @@ func open(ctx context.Context, cfg Config) (*Local, error) {
return nil, err
}
- sem, err := sema.New(cfg.Connections)
- if err != nil {
- return nil, err
- }
-
fi, err := fs.Stat(l.Filename(restic.Handle{Type: restic.ConfigFile}))
m := backend.DeriveModesFromFileInfo(fi, err)
debug.Log("using (%03O file, %03O dir) permissions", m.File, m.Dir)
@@ -50,7 +49,6 @@ func open(ctx context.Context, cfg Config) (*Local, error) {
return &Local{
Config: cfg,
Layout: l,
- sem: sem,
Modes: m,
}, nil
}
@@ -113,12 +111,7 @@ func (b *Local) IsNotExist(err error) bool {
}
// Save stores data in the backend at the handle.
-func (b *Local) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) (err error) {
- debug.Log("Save %v", h)
- if err := h.Valid(); err != nil {
- return backoff.Permanent(err)
- }
-
+func (b *Local) Save(_ context.Context, h restic.Handle, rd restic.RewindReader) (err error) {
finalname := b.Filename(h)
dir := filepath.Dir(finalname)
@@ -129,9 +122,6 @@ func (b *Local) Save(ctx context.Context, h restic.Handle, rd restic.RewindReade
}
}()
- b.sem.GetToken()
- defer b.sem.ReleaseToken()
-
// Create new file with a temporary name.
tmpname := filepath.Base(finalname) + "-tmp-"
f, err := tempFile(dir, tmpname)
@@ -164,6 +154,13 @@ func (b *Local) Save(ctx context.Context, h restic.Handle, rd restic.RewindReade
}
}(f)
+ // preallocate disk space
+ if size := rd.Length(); size > 0 {
+ if err := fs.PreallocateFile(f, size); err != nil {
+ debug.Log("Failed to preallocate %v with size %v: %v", finalname, size, err)
+ }
+ }
+
// save data, then sync
wbytes, err := io.Copy(f, rd)
if err != nil {
@@ -216,51 +213,29 @@ func (b *Local) Load(ctx context.Context, h restic.Handle, length int, offset in
return backend.DefaultLoad(ctx, h, length, offset, b.openReader, fn)
}
-func (b *Local) openReader(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
- debug.Log("Load %v, length %v, offset %v", h, length, offset)
- if err := h.Valid(); err != nil {
- return nil, backoff.Permanent(err)
- }
-
- if offset < 0 {
- return nil, errors.New("offset is negative")
- }
-
- b.sem.GetToken()
+func (b *Local) openReader(_ context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
f, err := fs.Open(b.Filename(h))
if err != nil {
- b.sem.ReleaseToken()
return nil, err
}
if offset > 0 {
_, err = f.Seek(offset, 0)
if err != nil {
- b.sem.ReleaseToken()
_ = f.Close()
return nil, err
}
}
- r := b.sem.ReleaseTokenOnClose(f, nil)
-
if length > 0 {
- return backend.LimitReadCloser(r, int64(length)), nil
+ return backend.LimitReadCloser(f, int64(length)), nil
}
- return r, nil
+ return f, nil
}
// Stat returns information about a blob.
-func (b *Local) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) {
- debug.Log("Stat %v", h)
- if err := h.Valid(); err != nil {
- return restic.FileInfo{}, backoff.Permanent(err)
- }
-
- b.sem.GetToken()
- defer b.sem.ReleaseToken()
-
+func (b *Local) Stat(_ context.Context, h restic.Handle) (restic.FileInfo, error) {
fi, err := fs.Stat(b.Filename(h))
if err != nil {
return restic.FileInfo{}, errors.WithStack(err)
@@ -270,13 +245,9 @@ func (b *Local) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, err
}
// Remove removes the blob with the given name and type.
-func (b *Local) Remove(ctx context.Context, h restic.Handle) error {
- debug.Log("Remove %v", h)
+func (b *Local) Remove(_ context.Context, h restic.Handle) error {
fn := b.Filename(h)
- b.sem.GetToken()
- defer b.sem.ReleaseToken()
-
// reset read-only flag
err := fs.Chmod(fn, 0666)
if err != nil && !os.IsPermission(err) {
@@ -289,8 +260,6 @@ func (b *Local) Remove(ctx context.Context, h restic.Handle) error {
// List runs fn for each file in the backend which has the type t. When an
// error occurs (or fn returns an error), List stops and returns it.
func (b *Local) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) (err error) {
- debug.Log("List %v", t)
-
basedir, subdirs := b.Basedir(t)
if subdirs {
err = visitDirs(ctx, basedir, fn)
@@ -383,14 +352,12 @@ func visitFiles(ctx context.Context, dir string, fn func(restic.FileInfo) error,
}
// Delete removes the repository and all files.
-func (b *Local) Delete(ctx context.Context) error {
- debug.Log("Delete()")
+func (b *Local) Delete(_ context.Context) error {
return fs.RemoveAll(b.Path)
}
// Close closes all open files.
func (b *Local) Close() error {
- debug.Log("Close()")
// this does not need to do anything, all open files are closed within the
// same function.
return nil
diff --git a/internal/backend/local/local_test.go b/internal/backend/local/local_test.go
index 495f220a0..2a8b626d4 100644
--- a/internal/backend/local/local_test.go
+++ b/internal/backend/local/local_test.go
@@ -8,50 +8,24 @@ import (
"github.com/restic/restic/internal/backend/local"
"github.com/restic/restic/internal/backend/test"
- "github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
-func newTestSuite(t testing.TB) *test.Suite {
- return &test.Suite{
+func newTestSuite(t testing.TB) *test.Suite[local.Config] {
+ return &test.Suite[local.Config]{
// NewConfig returns a config for a new temporary backend that will be used in tests.
- NewConfig: func() (interface{}, error) {
- dir, err := os.MkdirTemp(rtest.TestTempDir, "restic-test-local-")
- if err != nil {
- t.Fatal(err)
- }
-
+ NewConfig: func() (*local.Config, error) {
+ dir := rtest.TempDir(t)
t.Logf("create new backend at %v", dir)
- cfg := local.Config{
+ cfg := &local.Config{
Path: dir,
Connections: 2,
}
return cfg, nil
},
- // CreateFn is a function that creates a temporary repository for the tests.
- Create: func(config interface{}) (restic.Backend, error) {
- cfg := config.(local.Config)
- return local.Create(context.TODO(), cfg)
- },
-
- // OpenFn is a function that opens a previously created temporary repository.
- Open: func(config interface{}) (restic.Backend, error) {
- cfg := config.(local.Config)
- return local.Open(context.TODO(), cfg)
- },
-
- // CleanupFn removes data created during the tests.
- Cleanup: func(config interface{}) error {
- cfg := config.(local.Config)
- if !rtest.TestCleanupTempDirs {
- t.Logf("leaving test backend dir at %v", cfg.Path)
- }
-
- rtest.RemoveAll(t, cfg.Path)
- return nil
- },
+ Factory: local.NewFactory(),
}
}
diff --git a/internal/backend/location/display_location_test.go b/internal/backend/location/display_location_test.go
index 30d3cc286..19502d85b 100644
--- a/internal/backend/location/display_location_test.go
+++ b/internal/backend/location/display_location_test.go
@@ -1,96 +1,29 @@
-package location
+package location_test
-import "testing"
+import (
+ "testing"
-var passwordTests = []struct {
- input string
- expected string
-}{
- {
- "local:/srv/repo",
- "local:/srv/repo",
- },
- {
- "/dir1/dir2",
- "/dir1/dir2",
- },
- {
- `c:\dir1\foobar\dir2`,
- `c:\dir1\foobar\dir2`,
- },
- {
- "sftp:user@host:/srv/repo",
- "sftp:user@host:/srv/repo",
- },
- {
- "s3://eu-central-1/bucketname",
- "s3://eu-central-1/bucketname",
- },
- {
- "swift:container17:/prefix97",
- "swift:container17:/prefix97",
- },
- {
- "b2:bucketname:/prefix",
- "b2:bucketname:/prefix",
- },
- {
- "rest:",
- "rest:/",
- },
- {
- "rest:localhost/",
- "rest:localhost/",
- },
- {
- "rest::123/",
- "rest::123/",
- },
- {
- "rest:http://",
- "rest:http://",
- },
- {
- "rest:http://hostname.foo:1234/",
- "rest:http://hostname.foo:1234/",
- },
- {
- "rest:http://user@hostname.foo:1234/",
- "rest:http://user@hostname.foo:1234/",
- },
- {
- "rest:http://user:@hostname.foo:1234/",
- "rest:http://user:***@hostname.foo:1234/",
- },
- {
- "rest:http://user:p@hostname.foo:1234/",
- "rest:http://user:***@hostname.foo:1234/",
- },
- {
- "rest:http://user:pppppaaafhhfuuwiiehhthhghhdkjaoowpprooghjjjdhhwuuhgjsjhhfdjhruuhsjsdhhfhshhsppwufhhsjjsjs@hostname.foo:1234/",
- "rest:http://user:***@hostname.foo:1234/",
- },
- {
- "rest:http://user:password@hostname",
- "rest:http://user:***@hostname/",
- },
- {
- "rest:http://user:password@:123",
- "rest:http://user:***@:123/",
- },
- {
- "rest:http://user:password@",
- "rest:http://user:***@/",
- },
-}
+ "github.com/restic/restic/internal/backend/location"
+ "github.com/restic/restic/internal/restic"
+ "github.com/restic/restic/internal/test"
+)
func TestStripPassword(t *testing.T) {
- for i, test := range passwordTests {
- t.Run(test.input, func(t *testing.T) {
- result := StripPassword(test.input)
- if result != test.expected {
- t.Errorf("test %d: expected '%s' but got '%s'", i, test.expected, result)
- }
- })
- }
+ registry := location.NewRegistry()
+ registry.Register(
+ location.NewHTTPBackendFactory[any, restic.Backend]("test", nil,
+ func(s string) string {
+ return "cleaned"
+ }, nil, nil,
+ ),
+ )
+
+ t.Run("valid", func(t *testing.T) {
+ clean := location.StripPassword(registry, "test:secret")
+ test.Equals(t, "cleaned", clean)
+ })
+ t.Run("unknown", func(t *testing.T) {
+ clean := location.StripPassword(registry, "invalid:secret")
+ test.Equals(t, "invalid:secret", clean)
+ })
}
diff --git a/internal/backend/location/location.go b/internal/backend/location/location.go
index a732233cc..947ca17c3 100644
--- a/internal/backend/location/location.go
+++ b/internal/backend/location/location.go
@@ -4,15 +4,6 @@ package location
import (
"strings"
- "github.com/restic/restic/internal/backend/azure"
- "github.com/restic/restic/internal/backend/b2"
- "github.com/restic/restic/internal/backend/gs"
- "github.com/restic/restic/internal/backend/local"
- "github.com/restic/restic/internal/backend/rclone"
- "github.com/restic/restic/internal/backend/rest"
- "github.com/restic/restic/internal/backend/s3"
- "github.com/restic/restic/internal/backend/sftp"
- "github.com/restic/restic/internal/backend/swift"
"github.com/restic/restic/internal/errors"
)
@@ -23,28 +14,8 @@ type Location struct {
Config interface{}
}
-type parser struct {
- scheme string
- parse func(string) (interface{}, error)
- stripPassword func(string) string
-}
-
-// parsers is a list of valid config parsers for the backends. The first parser
-// is the fallback and should always be set to the local backend.
-var parsers = []parser{
- {"b2", b2.ParseConfig, noPassword},
- {"local", local.ParseConfig, noPassword},
- {"sftp", sftp.ParseConfig, noPassword},
- {"s3", s3.ParseConfig, noPassword},
- {"gs", gs.ParseConfig, noPassword},
- {"azure", azure.ParseConfig, noPassword},
- {"swift", swift.ParseConfig, noPassword},
- {"rest", rest.ParseConfig, rest.StripPassword},
- {"rclone", rclone.ParseConfig, noPassword},
-}
-
-// noPassword returns the repository location unchanged (there's no sensitive information there)
-func noPassword(s string) string {
+// NoPassword returns the repository location unchanged (there's no sensitive information there)
+func NoPassword(s string) string {
return s
}
@@ -82,16 +53,13 @@ func isPath(s string) bool {
// starts with a backend name followed by a colon, that backend's Parse()
// function is called. Otherwise, the local backend is used which interprets s
// as the name of a directory.
-func Parse(s string) (u Location, err error) {
+func Parse(registry *Registry, s string) (u Location, err error) {
scheme := extractScheme(s)
u.Scheme = scheme
- for _, parser := range parsers {
- if parser.scheme != scheme {
- continue
- }
-
- u.Config, err = parser.parse(s)
+ factory := registry.Lookup(scheme)
+ if factory != nil {
+ u.Config, err = factory.ParseConfig(s)
if err != nil {
return Location{}, err
}
@@ -105,7 +73,12 @@ func Parse(s string) (u Location, err error) {
}
u.Scheme = "local"
- u.Config, err = local.ParseConfig("local:" + s)
+ factory = registry.Lookup(u.Scheme)
+ if factory == nil {
+ return Location{}, errors.New("local backend not available")
+ }
+
+ u.Config, err = factory.ParseConfig("local:" + s)
if err != nil {
return Location{}, err
}
@@ -114,14 +87,12 @@ func Parse(s string) (u Location, err error) {
}
// StripPassword returns a displayable version of a repository location (with any sensitive information removed)
-func StripPassword(s string) string {
+func StripPassword(registry *Registry, s string) string {
scheme := extractScheme(s)
- for _, parser := range parsers {
- if parser.scheme != scheme {
- continue
- }
- return parser.stripPassword(s)
+ factory := registry.Lookup(scheme)
+ if factory != nil {
+ return factory.StripPassword(s)
}
return s
}
diff --git a/internal/backend/location/location_test.go b/internal/backend/location/location_test.go
index 809379850..b2623032e 100644
--- a/internal/backend/location/location_test.go
+++ b/internal/backend/location/location_test.go
@@ -1,344 +1,65 @@
-package location
+package location_test
import (
- "net/url"
- "reflect"
"testing"
- "github.com/restic/restic/internal/backend/b2"
- "github.com/restic/restic/internal/backend/local"
- "github.com/restic/restic/internal/backend/rest"
- "github.com/restic/restic/internal/backend/s3"
- "github.com/restic/restic/internal/backend/sftp"
- "github.com/restic/restic/internal/backend/swift"
+ "github.com/restic/restic/internal/backend/location"
+ "github.com/restic/restic/internal/restic"
+ "github.com/restic/restic/internal/test"
)
-func parseURL(s string) *url.URL {
- u, err := url.Parse(s)
- if err != nil {
- panic(err)
- }
+type testConfig struct {
+ loc string
+}
- return u
+func testFactory() location.Factory {
+ return location.NewHTTPBackendFactory[testConfig, restic.Backend](
+ "local",
+ func(s string) (*testConfig, error) {
+ return &testConfig{loc: s}, nil
+ }, nil, nil, nil,
+ )
}
-var parseTests = []struct {
- s string
- u Location
-}{
- {
- "local:/srv/repo",
- Location{Scheme: "local",
- Config: local.Config{
- Path: "/srv/repo",
- Connections: 2,
- },
- },
- },
- {
- "local:dir1/dir2",
- Location{Scheme: "local",
- Config: local.Config{
- Path: "dir1/dir2",
- Connections: 2,
- },
- },
- },
- {
- "local:dir1/dir2",
- Location{Scheme: "local",
- Config: local.Config{
- Path: "dir1/dir2",
- Connections: 2,
- },
- },
- },
- {
+func TestParse(t *testing.T) {
+ registry := location.NewRegistry()
+ registry.Register(testFactory())
+
+ path := "local:example"
+ u, err := location.Parse(registry, path)
+ test.OK(t, err)
+ test.Equals(t, "local", u.Scheme)
+ test.Equals(t, &testConfig{loc: path}, u.Config)
+}
+
+func TestParseFallback(t *testing.T) {
+ fallbackTests := []string{
"dir1/dir2",
- Location{Scheme: "local",
- Config: local.Config{
- Path: "dir1/dir2",
- Connections: 2,
- },
- },
- },
- {
- "/dir1/dir2",
- Location{Scheme: "local",
- Config: local.Config{
- Path: "/dir1/dir2",
- Connections: 2,
- },
- },
- },
- {
- "local:../dir1/dir2",
- Location{Scheme: "local",
- Config: local.Config{
- Path: "../dir1/dir2",
- Connections: 2,
- },
- },
- },
- {
"/dir1/dir2",
- Location{Scheme: "local",
- Config: local.Config{
- Path: "/dir1/dir2",
- Connections: 2,
- },
- },
- },
- {
"/dir1:foobar/dir2",
- Location{Scheme: "local",
- Config: local.Config{
- Path: "/dir1:foobar/dir2",
- Connections: 2,
- },
- },
- },
- {
`\dir1\foobar\dir2`,
- Location{Scheme: "local",
- Config: local.Config{
- Path: `\dir1\foobar\dir2`,
- Connections: 2,
- },
- },
- },
- {
`c:\dir1\foobar\dir2`,
- Location{Scheme: "local",
- Config: local.Config{
- Path: `c:\dir1\foobar\dir2`,
- Connections: 2,
- },
- },
- },
- {
`C:\Users\appveyor\AppData\Local\Temp\1\restic-test-879453535\repo`,
- Location{Scheme: "local",
- Config: local.Config{
- Path: `C:\Users\appveyor\AppData\Local\Temp\1\restic-test-879453535\repo`,
- Connections: 2,
- },
- },
- },
- {
`c:/dir1/foobar/dir2`,
- Location{Scheme: "local",
- Config: local.Config{
- Path: `c:/dir1/foobar/dir2`,
- Connections: 2,
- },
- },
- },
- {
- "sftp:user@host:/srv/repo",
- Location{Scheme: "sftp",
- Config: sftp.Config{
- User: "user",
- Host: "host",
- Path: "/srv/repo",
- Connections: 5,
- },
- },
- },
- {
- "sftp:host:/srv/repo",
- Location{Scheme: "sftp",
- Config: sftp.Config{
- User: "",
- Host: "host",
- Path: "/srv/repo",
- Connections: 5,
- },
- },
- },
- {
- "sftp://user@host/srv/repo",
- Location{Scheme: "sftp",
- Config: sftp.Config{
- User: "user",
- Host: "host",
- Path: "srv/repo",
- Connections: 5,
- },
- },
- },
- {
- "sftp://user@host//srv/repo",
- Location{Scheme: "sftp",
- Config: sftp.Config{
- User: "user",
- Host: "host",
- Path: "/srv/repo",
- Connections: 5,
- },
- },
- },
+ }
- {
- "s3://eu-central-1/bucketname",
- Location{Scheme: "s3",
- Config: s3.Config{
- Endpoint: "eu-central-1",
- Bucket: "bucketname",
- Prefix: "",
- Connections: 5,
- },
- },
- },
- {
- "s3://hostname.foo/bucketname",
- Location{Scheme: "s3",
- Config: s3.Config{
- Endpoint: "hostname.foo",
- Bucket: "bucketname",
- Prefix: "",
- Connections: 5,
- },
- },
- },
- {
- "s3://hostname.foo/bucketname/prefix/directory",
- Location{Scheme: "s3",
- Config: s3.Config{
- Endpoint: "hostname.foo",
- Bucket: "bucketname",
- Prefix: "prefix/directory",
- Connections: 5,
- },
- },
- },
- {
- "s3:eu-central-1/repo",
- Location{Scheme: "s3",
- Config: s3.Config{
- Endpoint: "eu-central-1",
- Bucket: "repo",
- Prefix: "",
- Connections: 5,
- },
- },
- },
- {
- "s3:eu-central-1/repo/prefix/directory",
- Location{Scheme: "s3",
- Config: s3.Config{
- Endpoint: "eu-central-1",
- Bucket: "repo",
- Prefix: "prefix/directory",
- Connections: 5,
- },
- },
- },
- {
- "s3:https://hostname.foo/repo",
- Location{Scheme: "s3",
- Config: s3.Config{
- Endpoint: "hostname.foo",
- Bucket: "repo",
- Prefix: "",
- Connections: 5,
- },
- },
- },
- {
- "s3:https://hostname.foo/repo/prefix/directory",
- Location{Scheme: "s3",
- Config: s3.Config{
- Endpoint: "hostname.foo",
- Bucket: "repo",
- Prefix: "prefix/directory",
- Connections: 5,
- },
- },
- },
- {
- "s3:http://hostname.foo/repo",
- Location{Scheme: "s3",
- Config: s3.Config{
- Endpoint: "hostname.foo",
- Bucket: "repo",
- Prefix: "",
- UseHTTP: true,
- Connections: 5,
- },
- },
- },
- {
- "swift:container17:/",
- Location{Scheme: "swift",
- Config: swift.Config{
- Container: "container17",
- Prefix: "",
- Connections: 5,
- },
- },
- },
- {
- "swift:container17:/prefix97",
- Location{Scheme: "swift",
- Config: swift.Config{
- Container: "container17",
- Prefix: "prefix97",
- Connections: 5,
- },
- },
- },
- {
- "rest:http://hostname.foo:1234/",
- Location{Scheme: "rest",
- Config: rest.Config{
- URL: parseURL("http://hostname.foo:1234/"),
- Connections: 5,
- },
- },
- },
- {
- "b2:bucketname:/prefix", Location{Scheme: "b2",
- Config: b2.Config{
- Bucket: "bucketname",
- Prefix: "prefix",
- Connections: 5,
- },
- },
- },
- {
- "b2:bucketname", Location{Scheme: "b2",
- Config: b2.Config{
- Bucket: "bucketname",
- Prefix: "",
- Connections: 5,
- },
- },
- },
-}
+ registry := location.NewRegistry()
+ registry.Register(testFactory())
-func TestParse(t *testing.T) {
- for i, test := range parseTests {
- t.Run(test.s, func(t *testing.T) {
- u, err := Parse(test.s)
+ for _, path := range fallbackTests {
+ t.Run(path, func(t *testing.T) {
+ u, err := location.Parse(registry, path)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
-
- if test.u.Scheme != u.Scheme {
- t.Errorf("test %d: scheme does not match, want %q, got %q",
- i, test.u.Scheme, u.Scheme)
- }
-
- if !reflect.DeepEqual(test.u.Config, u.Config) {
- t.Errorf("test %d: cfg map does not match, want:\n %#v\ngot: \n %#v",
- i, test.u.Config, u.Config)
- }
+ test.Equals(t, "local", u.Scheme)
+ test.Equals(t, "local:"+path, u.Config.(*testConfig).loc)
})
}
}
func TestInvalidScheme(t *testing.T) {
+ registry := location.NewRegistry()
var invalidSchemes = []string{
"foobar:xxx",
"foobar:/dir/dir2",
@@ -346,7 +67,7 @@ func TestInvalidScheme(t *testing.T) {
for _, s := range invalidSchemes {
t.Run(s, func(t *testing.T) {
- _, err := Parse(s)
+ _, err := location.Parse(registry, s)
if err == nil {
t.Fatalf("error for invalid location %q not found", s)
}
diff --git a/internal/backend/location/registry.go b/internal/backend/location/registry.go
new file mode 100644
index 000000000..a8818bd73
--- /dev/null
+++ b/internal/backend/location/registry.go
@@ -0,0 +1,106 @@
+package location
+
+import (
+ "context"
+ "net/http"
+
+ "github.com/restic/restic/internal/backend/limiter"
+ "github.com/restic/restic/internal/restic"
+)
+
+type Registry struct {
+ factories map[string]Factory
+}
+
+func NewRegistry() *Registry {
+ return &Registry{
+ factories: make(map[string]Factory),
+ }
+}
+
+func (r *Registry) Register(factory Factory) {
+ if r.factories[factory.Scheme()] != nil {
+ panic("duplicate backend")
+ }
+ r.factories[factory.Scheme()] = factory
+}
+
+func (r *Registry) Lookup(scheme string) Factory {
+ return r.factories[scheme]
+}
+
+type Factory interface {
+ Scheme() string
+ ParseConfig(s string) (interface{}, error)
+ StripPassword(s string) string
+ Create(ctx context.Context, cfg interface{}, rt http.RoundTripper, lim limiter.Limiter) (restic.Backend, error)
+ Open(ctx context.Context, cfg interface{}, rt http.RoundTripper, lim limiter.Limiter) (restic.Backend, error)
+}
+
+type genericBackendFactory[C any, T restic.Backend] struct {
+ scheme string
+ parseConfigFn func(s string) (*C, error)
+ stripPasswordFn func(s string) string
+ createFn func(ctx context.Context, cfg C, rt http.RoundTripper, lim limiter.Limiter) (T, error)
+ openFn func(ctx context.Context, cfg C, rt http.RoundTripper, lim limiter.Limiter) (T, error)
+}
+
+func (f *genericBackendFactory[C, T]) Scheme() string {
+ return f.scheme
+}
+
+func (f *genericBackendFactory[C, T]) ParseConfig(s string) (interface{}, error) {
+ return f.parseConfigFn(s)
+}
+func (f *genericBackendFactory[C, T]) StripPassword(s string) string {
+ if f.stripPasswordFn != nil {
+ return f.stripPasswordFn(s)
+ }
+ return s
+}
+func (f *genericBackendFactory[C, T]) Create(ctx context.Context, cfg interface{}, rt http.RoundTripper, lim limiter.Limiter) (restic.Backend, error) {
+ return f.createFn(ctx, *cfg.(*C), rt, lim)
+}
+func (f *genericBackendFactory[C, T]) Open(ctx context.Context, cfg interface{}, rt http.RoundTripper, lim limiter.Limiter) (restic.Backend, error) {
+ return f.openFn(ctx, *cfg.(*C), rt, lim)
+}
+
+func NewHTTPBackendFactory[C any, T restic.Backend](
+ scheme string,
+ parseConfigFn func(s string) (*C, error),
+ stripPasswordFn func(s string) string,
+ createFn func(ctx context.Context, cfg C, rt http.RoundTripper) (T, error),
+ openFn func(ctx context.Context, cfg C, rt http.RoundTripper) (T, error)) Factory {
+
+ return &genericBackendFactory[C, T]{
+ scheme: scheme,
+ parseConfigFn: parseConfigFn,
+ stripPasswordFn: stripPasswordFn,
+ createFn: func(ctx context.Context, cfg C, rt http.RoundTripper, _ limiter.Limiter) (T, error) {
+ return createFn(ctx, cfg, rt)
+ },
+ openFn: func(ctx context.Context, cfg C, rt http.RoundTripper, _ limiter.Limiter) (T, error) {
+ return openFn(ctx, cfg, rt)
+ },
+ }
+}
+
+func NewLimitedBackendFactory[C any, T restic.Backend](
+ scheme string,
+ parseConfigFn func(s string) (*C, error),
+ stripPasswordFn func(s string) string,
+ createFn func(ctx context.Context, cfg C, lim limiter.Limiter) (T, error),
+ openFn func(ctx context.Context, cfg C, lim limiter.Limiter) (T, error)) Factory {
+
+ return &genericBackendFactory[C, T]{
+ scheme: scheme,
+ parseConfigFn: parseConfigFn,
+ stripPasswordFn: stripPasswordFn,
+ createFn: func(ctx context.Context, cfg C, _ http.RoundTripper, lim limiter.Limiter) (T, error) {
+ return createFn(ctx, cfg, lim)
+ },
+ openFn: func(ctx context.Context, cfg C, _ http.RoundTripper, lim limiter.Limiter) (T, error) {
+ return openFn(ctx, cfg, lim)
+ },
+ }
+}
diff --git a/internal/backend/logger/log.go b/internal/backend/logger/log.go
new file mode 100644
index 000000000..6c860cfae
--- /dev/null
+++ b/internal/backend/logger/log.go
@@ -0,0 +1,79 @@
+package logger
+
+import (
+ "context"
+ "io"
+
+ "github.com/restic/restic/internal/debug"
+ "github.com/restic/restic/internal/restic"
+)
+
+type Backend struct {
+ restic.Backend
+}
+
+// statically ensure that Backend implements restic.Backend.
+var _ restic.Backend = &Backend{}
+
+func New(be restic.Backend) *Backend {
+ return &Backend{Backend: be}
+}
+
+func (be *Backend) IsNotExist(err error) bool {
+ isNotExist := be.Backend.IsNotExist(err)
+ debug.Log("IsNotExist(%T, %#v, %v)", err, err, isNotExist)
+ return isNotExist
+}
+
+// Save adds new Data to the backend.
+func (be *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
+ debug.Log("Save(%v, %v)", h, rd.Length())
+ err := be.Backend.Save(ctx, h, rd)
+ debug.Log(" save err %v", err)
+ return err
+}
+
+// Remove deletes a file from the backend.
+func (be *Backend) Remove(ctx context.Context, h restic.Handle) error {
+ debug.Log("Remove(%v)", h)
+ err := be.Backend.Remove(ctx, h)
+ debug.Log(" remove err %v", err)
+ return err
+}
+
+func (be *Backend) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(io.Reader) error) error {
+ debug.Log("Load(%v, length %v, offset %v)", h, length, offset)
+ err := be.Backend.Load(ctx, h, length, offset, fn)
+ debug.Log(" load err %v", err)
+ return err
+}
+
+func (be *Backend) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) {
+ debug.Log("Stat(%v)", h)
+ fi, err := be.Backend.Stat(ctx, h)
+ debug.Log(" stat err %v", err)
+ return fi, err
+}
+
+func (be *Backend) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error {
+ debug.Log("List(%v)", t)
+ err := be.Backend.List(ctx, t, fn)
+ debug.Log(" list err %v", err)
+ return err
+}
+
+func (be *Backend) Delete(ctx context.Context) error {
+ debug.Log("Delete()")
+ err := be.Backend.Delete(ctx)
+ debug.Log(" delete err %v", err)
+ return err
+}
+
+func (be *Backend) Close() error {
+ debug.Log("Close()")
+ err := be.Backend.Close()
+ debug.Log(" close err %v", err)
+ return err
+}
+
+func (be *Backend) Unwrap() restic.Backend { return be.Backend }
diff --git a/internal/backend/mem/mem_backend.go b/internal/backend/mem/mem_backend.go
index 0c46dcd6e..86ec48756 100644
--- a/internal/backend/mem/mem_backend.go
+++ b/internal/backend/mem/mem_backend.go
@@ -6,16 +6,15 @@ import (
"encoding/base64"
"hash"
"io"
+ "net/http"
"sync"
"github.com/cespare/xxhash/v2"
"github.com/restic/restic/internal/backend"
- "github.com/restic/restic/internal/backend/sema"
+ "github.com/restic/restic/internal/backend/location"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
-
- "github.com/cenkalti/backoff/v4"
)
type memMap map[restic.Handle][]byte
@@ -23,6 +22,25 @@ type memMap map[restic.Handle][]byte
// make sure that MemoryBackend implements backend.Backend
var _ restic.Backend = &MemoryBackend{}
+// NewFactory creates a persistent mem backend
+func NewFactory() location.Factory {
+ be := New()
+
+ return location.NewHTTPBackendFactory[struct{}, *MemoryBackend](
+ "mem",
+ func(s string) (*struct{}, error) {
+ return &struct{}{}, nil
+ },
+ location.NoPassword,
+ func(_ context.Context, _ struct{}, _ http.RoundTripper) (*MemoryBackend, error) {
+ return be, nil
+ },
+ func(_ context.Context, _ struct{}, _ http.RoundTripper) (*MemoryBackend, error) {
+ return be, nil
+ },
+ )
+}
+
var errNotFound = errors.New("not found")
const connectionCount = 2
@@ -32,19 +50,12 @@ const connectionCount = 2
type MemoryBackend struct {
data memMap
m sync.Mutex
- sem sema.Semaphore
}
// New returns a new backend that saves all data in a map in memory.
func New() *MemoryBackend {
- sem, err := sema.New(connectionCount)
- if err != nil {
- panic(err)
- }
-
be := &MemoryBackend{
data: make(memMap),
- sem: sem,
}
debug.Log("created new memory backend")
@@ -59,13 +70,6 @@ func (be *MemoryBackend) IsNotExist(err error) bool {
// Save adds new Data to the backend.
func (be *MemoryBackend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
- if err := h.Valid(); err != nil {
- return backoff.Permanent(err)
- }
-
- be.sem.GetToken()
- defer be.sem.ReleaseToken()
-
be.m.Lock()
defer be.m.Unlock()
@@ -102,7 +106,6 @@ func (be *MemoryBackend) Save(ctx context.Context, h restic.Handle, rd restic.Re
}
be.data[h] = buf
- debug.Log("saved %v bytes at %v", len(buf), h)
return ctx.Err()
}
@@ -114,11 +117,6 @@ func (be *MemoryBackend) Load(ctx context.Context, h restic.Handle, length int,
}
func (be *MemoryBackend) openReader(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
- if err := h.Valid(); err != nil {
- return nil, backoff.Permanent(err)
- }
-
- be.sem.GetToken()
be.m.Lock()
defer be.m.Unlock()
@@ -127,21 +125,12 @@ func (be *MemoryBackend) openReader(ctx context.Context, h restic.Handle, length
h.Name = ""
}
- debug.Log("Load %v offset %v len %v", h, offset, length)
-
- if offset < 0 {
- be.sem.ReleaseToken()
- return nil, errors.New("offset is negative")
- }
-
if _, ok := be.data[h]; !ok {
- be.sem.ReleaseToken()
return nil, errNotFound
}
buf := be.data[h]
if offset > int64(len(buf)) {
- be.sem.ReleaseToken()
return nil, errors.New("offset beyond end of file")
}
@@ -150,18 +139,11 @@ func (be *MemoryBackend) openReader(ctx context.Context, h restic.Handle, length
buf = buf[:length]
}
- return be.sem.ReleaseTokenOnClose(io.NopCloser(bytes.NewReader(buf)), nil), ctx.Err()
+ return io.NopCloser(bytes.NewReader(buf)), ctx.Err()
}
// Stat returns information about a file in the backend.
func (be *MemoryBackend) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) {
- if err := h.Valid(); err != nil {
- return restic.FileInfo{}, backoff.Permanent(err)
- }
-
- be.sem.GetToken()
- defer be.sem.ReleaseToken()
-
be.m.Lock()
defer be.m.Unlock()
@@ -170,8 +152,6 @@ func (be *MemoryBackend) Stat(ctx context.Context, h restic.Handle) (restic.File
h.Name = ""
}
- debug.Log("stat %v", h)
-
e, ok := be.data[h]
if !ok {
return restic.FileInfo{}, errNotFound
@@ -182,14 +162,9 @@ func (be *MemoryBackend) Stat(ctx context.Context, h restic.Handle) (restic.File
// Remove deletes a file from the backend.
func (be *MemoryBackend) Remove(ctx context.Context, h restic.Handle) error {
- be.sem.GetToken()
- defer be.sem.ReleaseToken()
-
be.m.Lock()
defer be.m.Unlock()
- debug.Log("Remove %v", h)
-
h.ContainedBlobType = restic.InvalidBlob
if _, ok := be.data[h]; !ok {
return errNotFound
diff --git a/internal/backend/mem/mem_backend_test.go b/internal/backend/mem/mem_backend_test.go
index 819c6a2b6..c4dad0fb2 100644
--- a/internal/backend/mem/mem_backend_test.go
+++ b/internal/backend/mem/mem_backend_test.go
@@ -1,59 +1,20 @@
package mem_test
import (
- "context"
"testing"
- "github.com/restic/restic/internal/errors"
- "github.com/restic/restic/internal/restic"
-
"github.com/restic/restic/internal/backend/mem"
"github.com/restic/restic/internal/backend/test"
)
-type memConfig struct {
- be restic.Backend
-}
-
-func newTestSuite() *test.Suite {
- return &test.Suite{
+func newTestSuite() *test.Suite[struct{}] {
+ return &test.Suite[struct{}]{
// NewConfig returns a config for a new temporary backend that will be used in tests.
- NewConfig: func() (interface{}, error) {
- return &memConfig{}, nil
- },
-
- // CreateFn is a function that creates a temporary repository for the tests.
- Create: func(cfg interface{}) (restic.Backend, error) {
- c := cfg.(*memConfig)
- if c.be != nil {
- _, err := c.be.Stat(context.TODO(), restic.Handle{Type: restic.ConfigFile})
- if err != nil && !c.be.IsNotExist(err) {
- return nil, err
- }
-
- if err == nil {
- return nil, errors.New("config already exists")
- }
- }
-
- c.be = mem.New()
- return c.be, nil
+ NewConfig: func() (*struct{}, error) {
+ return &struct{}{}, nil
},
- // OpenFn is a function that opens a previously created temporary repository.
- Open: func(cfg interface{}) (restic.Backend, error) {
- c := cfg.(*memConfig)
- if c.be == nil {
- c.be = mem.New()
- }
- return c.be, nil
- },
-
- // CleanupFn removes data created during the tests.
- Cleanup: func(cfg interface{}) error {
- // no cleanup needed
- return nil
- },
+ Factory: mem.NewFactory(),
}
}
diff --git a/internal/backend/rclone/backend.go b/internal/backend/rclone/backend.go
index 085c89945..fd6f5b262 100644
--- a/internal/backend/rclone/backend.go
+++ b/internal/backend/rclone/backend.go
@@ -19,6 +19,7 @@ import (
"github.com/cenkalti/backoff/v4"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/limiter"
+ "github.com/restic/restic/internal/backend/location"
"github.com/restic/restic/internal/backend/rest"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
@@ -36,6 +37,10 @@ type Backend struct {
conn *StdioConn
}
+func NewFactory() location.Factory {
+ return location.NewLimitedBackendFactory("rclone", ParseConfig, location.NoPassword, Create, Open)
+}
+
// run starts command with args and initializes the StdioConn.
func run(command string, args ...string) (*StdioConn, *sync.WaitGroup, chan struct{}, func() error, error) {
cmd := exec.Command(command, args...)
@@ -134,7 +139,7 @@ func wrapConn(c *StdioConn, lim limiter.Limiter) *wrappedConn {
}
// New initializes a Backend and starts the process.
-func newBackend(cfg Config, lim limiter.Limiter) (*Backend, error) {
+func newBackend(ctx context.Context, cfg Config, lim limiter.Limiter) (*Backend, error) {
var (
args []string
err error
@@ -197,7 +202,7 @@ func newBackend(cfg Config, lim limiter.Limiter) (*Backend, error) {
wg: wg,
}
- ctx, cancel := context.WithCancel(context.Background())
+ ctx, cancel := context.WithCancel(ctx)
defer cancel()
wg.Add(1)
@@ -256,8 +261,8 @@ func newBackend(cfg Config, lim limiter.Limiter) (*Backend, error) {
}
// Open starts an rclone process with the given config.
-func Open(cfg Config, lim limiter.Limiter) (*Backend, error) {
- be, err := newBackend(cfg, lim)
+func Open(ctx context.Context, cfg Config, lim limiter.Limiter) (*Backend, error) {
+ be, err := newBackend(ctx, cfg, lim)
if err != nil {
return nil, err
}
@@ -272,7 +277,7 @@ func Open(cfg Config, lim limiter.Limiter) (*Backend, error) {
URL: url,
}
- restBackend, err := rest.Open(restConfig, debug.RoundTripper(be.tr))
+ restBackend, err := rest.Open(ctx, restConfig, debug.RoundTripper(be.tr))
if err != nil {
_ = be.Close()
return nil, err
@@ -283,8 +288,8 @@ func Open(cfg Config, lim limiter.Limiter) (*Backend, error) {
}
// Create initializes a new restic repo with rclone.
-func Create(ctx context.Context, cfg Config) (*Backend, error) {
- be, err := newBackend(cfg, nil)
+func Create(ctx context.Context, cfg Config, lim limiter.Limiter) (*Backend, error) {
+ be, err := newBackend(ctx, cfg, lim)
if err != nil {
return nil, err
}
diff --git a/internal/backend/rclone/backend_test.go b/internal/backend/rclone/backend_test.go
index 12fed6274..742031585 100644
--- a/internal/backend/rclone/backend_test.go
+++ b/internal/backend/rclone/backend_test.go
@@ -1,48 +1,35 @@
package rclone_test
import (
- "context"
"os/exec"
"testing"
"github.com/restic/restic/internal/backend/rclone"
"github.com/restic/restic/internal/backend/test"
- "github.com/restic/restic/internal/errors"
- "github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
-func newTestSuite(t testing.TB) *test.Suite {
+func newTestSuite(t testing.TB) *test.Suite[rclone.Config] {
dir := rtest.TempDir(t)
- return &test.Suite{
+ return &test.Suite[rclone.Config]{
// NewConfig returns a config for a new temporary backend that will be used in tests.
- NewConfig: func() (interface{}, error) {
+ NewConfig: func() (*rclone.Config, error) {
t.Logf("use backend at %v", dir)
cfg := rclone.NewConfig()
cfg.Remote = dir
- return cfg, nil
+ return &cfg, nil
},
- // CreateFn is a function that creates a temporary repository for the tests.
- Create: func(config interface{}) (restic.Backend, error) {
- t.Logf("Create()")
- cfg := config.(rclone.Config)
- be, err := rclone.Create(context.TODO(), cfg)
- var e *exec.Error
- if errors.As(err, &e) && e.Err == exec.ErrNotFound {
- t.Skipf("program %q not found", e.Name)
- return nil, nil
- }
- return be, err
- },
+ Factory: rclone.NewFactory(),
+ }
+}
- // OpenFn is a function that opens a previously created temporary repository.
- Open: func(config interface{}) (restic.Backend, error) {
- t.Logf("Open()")
- cfg := config.(rclone.Config)
- return rclone.Open(cfg, nil)
- },
+func findRclone(t testing.TB) {
+ // try to find a rclone binary
+ _, err := exec.LookPath("rclone")
+ if err != nil {
+ t.Skip(err)
}
}
@@ -53,9 +40,11 @@ func TestBackendRclone(t *testing.T) {
}
}()
+ findRclone(t)
newTestSuite(t).RunTests(t)
}
func BenchmarkBackendREST(t *testing.B) {
+ findRclone(t)
newTestSuite(t).RunBenchmarks(t)
}
diff --git a/internal/backend/rclone/config.go b/internal/backend/rclone/config.go
index f8dc0d84d..2071d84e2 100644
--- a/internal/backend/rclone/config.go
+++ b/internal/backend/rclone/config.go
@@ -34,7 +34,7 @@ func NewConfig() Config {
}
// ParseConfig parses the string s and extracts the remote server URL.
-func ParseConfig(s string) (interface{}, error) {
+func ParseConfig(s string) (*Config, error) {
if !strings.HasPrefix(s, "rclone:") {
return nil, errors.New("invalid rclone backend specification")
}
@@ -42,5 +42,5 @@ func ParseConfig(s string) (interface{}, error) {
s = s[7:]
cfg := NewConfig()
cfg.Remote = s
- return cfg, nil
+ return &cfg, nil
}
diff --git a/internal/backend/rclone/config_test.go b/internal/backend/rclone/config_test.go
index 923555136..67b983f66 100644
--- a/internal/backend/rclone/config_test.go
+++ b/internal/backend/rclone/config_test.go
@@ -1,37 +1,24 @@
package rclone
import (
- "reflect"
"testing"
+
+ "github.com/restic/restic/internal/backend/test"
)
-func TestParseConfig(t *testing.T) {
- var tests = []struct {
- s string
- cfg Config
- }{
- {
- "rclone:local:foo:/bar",
- Config{
- Remote: "local:foo:/bar",
- Program: defaultConfig.Program,
- Args: defaultConfig.Args,
- Connections: defaultConfig.Connections,
- Timeout: defaultConfig.Timeout,
- },
+var configTests = []test.ConfigTestData[Config]{
+ {
+ S: "rclone:local:foo:/bar",
+ Cfg: Config{
+ Remote: "local:foo:/bar",
+ Program: defaultConfig.Program,
+ Args: defaultConfig.Args,
+ Connections: defaultConfig.Connections,
+ Timeout: defaultConfig.Timeout,
},
- }
-
- for _, test := range tests {
- t.Run("", func(t *testing.T) {
- cfg, err := ParseConfig(test.s)
- if err != nil {
- t.Fatal(err)
- }
+ },
+}
- if !reflect.DeepEqual(cfg, test.cfg) {
- t.Fatalf("wrong config, want:\n %v\ngot:\n %v", test.cfg, cfg)
- }
- })
- }
+func TestParseConfig(t *testing.T) {
+ test.ParseConfigTester(t, ParseConfig, configTests)
}
diff --git a/internal/backend/rclone/internal_test.go b/internal/backend/rclone/internal_test.go
index bfec2b98c..32fe850a0 100644
--- a/internal/backend/rclone/internal_test.go
+++ b/internal/backend/rclone/internal_test.go
@@ -15,7 +15,7 @@ func TestRcloneExit(t *testing.T) {
dir := rtest.TempDir(t)
cfg := NewConfig()
cfg.Remote = dir
- be, err := Open(cfg, nil)
+ be, err := Open(context.TODO(), cfg, nil)
var e *exec.Error
if errors.As(err, &e) && e.Err == exec.ErrNotFound {
t.Skipf("program %q not found", e.Name)
@@ -45,7 +45,7 @@ func TestRcloneFailedStart(t *testing.T) {
cfg := NewConfig()
// exits with exit code 1
cfg.Program = "false"
- _, err := Open(cfg, nil)
+ _, err := Open(context.TODO(), cfg, nil)
var e *exec.ExitError
if !errors.As(err, &e) {
// unexpected error
diff --git a/internal/backend/rest/config.go b/internal/backend/rest/config.go
index 388153fce..ba42a0220 100644
--- a/internal/backend/rest/config.go
+++ b/internal/backend/rest/config.go
@@ -26,7 +26,7 @@ func NewConfig() Config {
}
// ParseConfig parses the string s and extracts the REST server URL.
-func ParseConfig(s string) (interface{}, error) {
+func ParseConfig(s string) (*Config, error) {
if !strings.HasPrefix(s, "rest:") {
return nil, errors.New("invalid REST backend specification")
}
@@ -40,7 +40,7 @@ func ParseConfig(s string) (interface{}, error) {
cfg := NewConfig()
cfg.URL = u
- return cfg, nil
+ return &cfg, nil
}
// StripPassword removes the password from the URL
diff --git a/internal/backend/rest/config_test.go b/internal/backend/rest/config_test.go
index 2d8e32a73..23ea9095b 100644
--- a/internal/backend/rest/config_test.go
+++ b/internal/backend/rest/config_test.go
@@ -2,8 +2,9 @@ package rest
import (
"net/url"
- "reflect"
"testing"
+
+ "github.com/restic/restic/internal/backend/test"
)
func parseURL(s string) *url.URL {
@@ -15,20 +16,17 @@ func parseURL(s string) *url.URL {
return u
}
-var configTests = []struct {
- s string
- cfg Config
-}{
+var configTests = []test.ConfigTestData[Config]{
{
- s: "rest:http://localhost:1234",
- cfg: Config{
+ S: "rest:http://localhost:1234",
+ Cfg: Config{
URL: parseURL("http://localhost:1234/"),
Connections: 5,
},
},
{
- s: "rest:http://localhost:1234/",
- cfg: Config{
+ S: "rest:http://localhost:1234/",
+ Cfg: Config{
URL: parseURL("http://localhost:1234/"),
Connections: 5,
},
@@ -36,16 +34,72 @@ var configTests = []struct {
}
func TestParseConfig(t *testing.T) {
- for _, test := range configTests {
- t.Run("", func(t *testing.T) {
- cfg, err := ParseConfig(test.s)
- if err != nil {
- t.Fatalf("%s failed: %v", test.s, err)
- }
+ test.ParseConfigTester(t, ParseConfig, configTests)
+}
+
+var passwordTests = []struct {
+ input string
+ expected string
+}{
+ {
+ "rest:",
+ "rest:/",
+ },
+ {
+ "rest:localhost/",
+ "rest:localhost/",
+ },
+ {
+ "rest::123/",
+ "rest::123/",
+ },
+ {
+ "rest:http://",
+ "rest:http://",
+ },
+ {
+ "rest:http://hostname.foo:1234/",
+ "rest:http://hostname.foo:1234/",
+ },
+ {
+ "rest:http://user@hostname.foo:1234/",
+ "rest:http://user@hostname.foo:1234/",
+ },
+ {
+ "rest:http://user:@hostname.foo:1234/",
+ "rest:http://user:***@hostname.foo:1234/",
+ },
+ {
+ "rest:http://user:p@hostname.foo:1234/",
+ "rest:http://user:***@hostname.foo:1234/",
+ },
+ {
+ "rest:http://user:pppppaaafhhfuuwiiehhthhghhdkjaoowpprooghjjjdhhwuuhgjsjhhfdjhruuhsjsdhhfhshhsppwufhhsjjsjs@hostname.foo:1234/",
+ "rest:http://user:***@hostname.foo:1234/",
+ },
+ {
+ "rest:http://user:password@hostname",
+ "rest:http://user:***@hostname/",
+ },
+ {
+ "rest:http://user:password@:123",
+ "rest:http://user:***@:123/",
+ },
+ {
+ "rest:http://user:password@",
+ "rest:http://user:***@/",
+ },
+}
+
+func TestStripPassword(t *testing.T) {
+ // Make sure that the factory uses the correct method
+ StripPassword := NewFactory().StripPassword
- if !reflect.DeepEqual(cfg, test.cfg) {
- t.Fatalf("\ninput: %s\n wrong config, want:\n %v\ngot:\n %v",
- test.s, test.cfg, cfg)
+ for i, test := range passwordTests {
+ t.Run(test.input, func(t *testing.T) {
+ result := StripPassword(test.input)
+ if result != test.expected {
+ t.Errorf("test %d: expected '%s' but got '%s'", i, test.expected, result)
}
})
}
diff --git a/internal/backend/rest/rest.go b/internal/backend/rest/rest.go
index f4c2897b9..f8670280d 100644
--- a/internal/backend/rest/rest.go
+++ b/internal/backend/rest/rest.go
@@ -11,13 +11,12 @@ import (
"path"
"strings"
+ "github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/layout"
- "github.com/restic/restic/internal/backend/sema"
+ "github.com/restic/restic/internal/backend/location"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
-
- "github.com/cenkalti/backoff/v4"
)
// make sure the rest backend implements restic.Backend
@@ -27,11 +26,14 @@ var _ restic.Backend = &Backend{}
type Backend struct {
url *url.URL
connections uint
- sem sema.Semaphore
client http.Client
layout.Layout
}
+func NewFactory() location.Factory {
+ return location.NewHTTPBackendFactory("rest", ParseConfig, StripPassword, Create, Open)
+}
+
// the REST API protocol version is decided by HTTP request headers, these are the constants.
const (
ContentTypeV1 = "application/vnd.x.restic.rest.v1"
@@ -39,12 +41,7 @@ const (
)
// Open opens the REST backend with the given config.
-func Open(cfg Config, rt http.RoundTripper) (*Backend, error) {
- sem, err := sema.New(cfg.Connections)
- if err != nil {
- return nil, err
- }
-
+func Open(_ context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) {
// use url without trailing slash for layout
url := cfg.URL.String()
if url[len(url)-1] == '/' {
@@ -56,7 +53,6 @@ func Open(cfg Config, rt http.RoundTripper) (*Backend, error) {
client: http.Client{Transport: rt},
Layout: &layout.RESTLayout{URL: url, Join: path.Join},
connections: cfg.Connections,
- sem: sem,
}
return be, nil
@@ -64,14 +60,14 @@ func Open(cfg Config, rt http.RoundTripper) (*Backend, error) {
// Create creates a new REST on server configured in config.
func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) {
- be, err := Open(cfg, rt)
+ be, err := Open(ctx, cfg, rt)
if err != nil {
return nil, err
}
_, err = be.Stat(ctx, restic.Handle{Type: restic.ConfigFile})
if err == nil {
- return nil, errors.Fatal("config file already exists")
+ return nil, errors.New("config file already exists")
}
url := *cfg.URL
@@ -85,7 +81,7 @@ func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, er
}
if resp.StatusCode != http.StatusOK {
- return nil, errors.Fatalf("server response unexpected: %v (%v)", resp.Status, resp.StatusCode)
+ return nil, fmt.Errorf("server response unexpected: %v (%v)", resp.Status, resp.StatusCode)
}
_, err = io.Copy(io.Discard, resp.Body)
@@ -123,10 +119,6 @@ func (b *Backend) HasAtomicReplace() bool {
// Save stores data in the backend at the handle.
func (b *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
- if err := h.Valid(); err != nil {
- return backoff.Permanent(err)
- }
-
ctx, cancel := context.WithCancel(ctx)
defer cancel()
@@ -143,9 +135,7 @@ func (b *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindRea
// let's the server know what's coming.
req.ContentLength = rd.Length()
- b.sem.GetToken()
resp, err := b.client.Do(req)
- b.sem.ReleaseToken()
var cerr error
if resp != nil {
@@ -157,7 +147,7 @@ func (b *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindRea
return errors.WithStack(err)
}
- if resp.StatusCode != 200 {
+ if resp.StatusCode != http.StatusOK {
return errors.Errorf("server response unexpected: %v (%v)", resp.Status, resp.StatusCode)
}
@@ -212,19 +202,6 @@ func (b *Backend) Load(ctx context.Context, h restic.Handle, length int, offset
}
func (b *Backend) openReader(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
- debug.Log("Load %v, length %v, offset %v", h, length, offset)
- if err := h.Valid(); err != nil {
- return nil, backoff.Permanent(err)
- }
-
- if offset < 0 {
- return nil, errors.New("offset is negative")
- }
-
- if length < 0 {
- return nil, errors.Errorf("invalid length %d", length)
- }
-
req, err := http.NewRequestWithContext(ctx, "GET", b.Filename(h), nil)
if err != nil {
return nil, errors.WithStack(err)
@@ -236,11 +213,8 @@ func (b *Backend) openReader(ctx context.Context, h restic.Handle, length int, o
}
req.Header.Set("Range", byteRange)
req.Header.Set("Accept", ContentTypeV2)
- debug.Log("Load(%v) send range %v", h, byteRange)
- b.sem.GetToken()
resp, err := b.client.Do(req)
- b.sem.ReleaseToken()
if err != nil {
if resp != nil {
@@ -255,7 +229,7 @@ func (b *Backend) openReader(ctx context.Context, h restic.Handle, length int, o
return nil, &notExistError{h}
}
- if resp.StatusCode != 200 && resp.StatusCode != 206 {
+ if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
_ = resp.Body.Close()
return nil, errors.Errorf("unexpected HTTP response (%v): %v", resp.StatusCode, resp.Status)
}
@@ -265,19 +239,13 @@ func (b *Backend) openReader(ctx context.Context, h restic.Handle, length int, o
// Stat returns information about a blob.
func (b *Backend) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) {
- if err := h.Valid(); err != nil {
- return restic.FileInfo{}, backoff.Permanent(err)
- }
-
req, err := http.NewRequestWithContext(ctx, http.MethodHead, b.Filename(h), nil)
if err != nil {
return restic.FileInfo{}, errors.WithStack(err)
}
req.Header.Set("Accept", ContentTypeV2)
- b.sem.GetToken()
resp, err := b.client.Do(req)
- b.sem.ReleaseToken()
if err != nil {
return restic.FileInfo{}, errors.WithStack(err)
}
@@ -292,7 +260,7 @@ func (b *Backend) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, e
return restic.FileInfo{}, &notExistError{h}
}
- if resp.StatusCode != 200 {
+ if resp.StatusCode != http.StatusOK {
return restic.FileInfo{}, errors.Errorf("unexpected HTTP response (%v): %v", resp.StatusCode, resp.Status)
}
@@ -310,19 +278,13 @@ func (b *Backend) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, e
// Remove removes the blob with the given name and type.
func (b *Backend) Remove(ctx context.Context, h restic.Handle) error {
- if err := h.Valid(); err != nil {
- return backoff.Permanent(err)
- }
-
req, err := http.NewRequestWithContext(ctx, "DELETE", b.Filename(h), nil)
if err != nil {
return errors.WithStack(err)
}
req.Header.Set("Accept", ContentTypeV2)
- b.sem.GetToken()
resp, err := b.client.Do(req)
- b.sem.ReleaseToken()
if err != nil {
return errors.Wrap(err, "client.Do")
@@ -333,7 +295,7 @@ func (b *Backend) Remove(ctx context.Context, h restic.Handle) error {
return &notExistError{h}
}
- if resp.StatusCode != 200 {
+ if resp.StatusCode != http.StatusOK {
return errors.Errorf("blob not removed, server response: %v (%v)", resp.Status, resp.StatusCode)
}
@@ -359,20 +321,23 @@ func (b *Backend) List(ctx context.Context, t restic.FileType, fn func(restic.Fi
}
req.Header.Set("Accept", ContentTypeV2)
- b.sem.GetToken()
resp, err := b.client.Do(req)
- b.sem.ReleaseToken()
if err != nil {
return errors.Wrap(err, "List")
}
- if resp.StatusCode != 200 {
+ if resp.StatusCode == http.StatusNotFound {
+ // ignore missing directories
+ return nil
+ }
+
+ if resp.StatusCode != http.StatusOK {
return errors.Errorf("List failed, server response: %v (%v)", resp.Status, resp.StatusCode)
}
if resp.Header.Get("Content-Type") == ContentTypeV2 {
- return b.listv2(ctx, t, resp, fn)
+ return b.listv2(ctx, resp, fn)
}
return b.listv1(ctx, t, resp, fn)
@@ -415,7 +380,7 @@ func (b *Backend) listv1(ctx context.Context, t restic.FileType, resp *http.Resp
// listv2 uses the REST protocol v2, where a list HTTP request (e.g. `GET
// /data/`) returns the names and sizes of all files.
-func (b *Backend) listv2(ctx context.Context, t restic.FileType, resp *http.Response, fn func(restic.FileInfo) error) error {
+func (b *Backend) listv2(ctx context.Context, resp *http.Response, fn func(restic.FileInfo) error) error {
debug.Log("parsing API v2 response")
dec := json.NewDecoder(resp.Body)
@@ -457,32 +422,7 @@ func (b *Backend) Close() error {
return nil
}
-// Remove keys for a specified backend type.
-func (b *Backend) removeKeys(ctx context.Context, t restic.FileType) error {
- return b.List(ctx, t, func(fi restic.FileInfo) error {
- return b.Remove(ctx, restic.Handle{Type: t, Name: fi.Name})
- })
-}
-
// Delete removes all data in the backend.
func (b *Backend) Delete(ctx context.Context) error {
- alltypes := []restic.FileType{
- restic.PackFile,
- restic.KeyFile,
- restic.LockFile,
- restic.SnapshotFile,
- restic.IndexFile}
-
- for _, t := range alltypes {
- err := b.removeKeys(ctx, t)
- if err != nil {
- return nil
- }
- }
-
- err := b.Remove(ctx, restic.Handle{Type: restic.ConfigFile})
- if err != nil && b.IsNotExist(err) {
- return nil
- }
- return err
+ return backend.DefaultDelete(ctx, b)
}
diff --git a/internal/backend/rest/rest_int_test.go b/internal/backend/rest/rest_int_test.go
index 8a868630f..e7810c5e3 100644
--- a/internal/backend/rest/rest_int_test.go
+++ b/internal/backend/rest/rest_int_test.go
@@ -93,12 +93,12 @@ func TestListAPI(t *testing.T) {
// stat file in data/, use the first two bytes in the name
// of the file as the size :)
filename := req.URL.Path[6:]
- len, err := strconv.ParseInt(filename[:4], 16, 64)
+ length, err := strconv.ParseInt(filename[:4], 16, 64)
if err != nil {
t.Fatal(err)
}
- res.Header().Set("Content-Length", fmt.Sprintf("%d", len))
+ res.Header().Set("Content-Length", fmt.Sprintf("%d", length))
res.WriteHeader(http.StatusOK)
return
}
@@ -117,7 +117,7 @@ func TestListAPI(t *testing.T) {
URL: srvURL,
}
- be, err := rest.Open(cfg, http.DefaultTransport)
+ be, err := rest.Open(context.TODO(), cfg, http.DefaultTransport)
if err != nil {
t.Fatal(err)
}
diff --git a/internal/backend/rest/rest_test.go b/internal/backend/rest/rest_test.go
index a473e4440..6a5b4f8a5 100644
--- a/internal/backend/rest/rest_test.go
+++ b/internal/backend/rest/rest_test.go
@@ -9,10 +9,8 @@ import (
"testing"
"time"
- "github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/rest"
"github.com/restic/restic/internal/backend/test"
- "github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
@@ -67,38 +65,18 @@ func runRESTServer(ctx context.Context, t testing.TB, dir string) (*url.URL, fun
return url, cleanup
}
-func newTestSuite(ctx context.Context, t testing.TB, url *url.URL, minimalData bool) *test.Suite {
- tr, err := backend.Transport(backend.TransportOptions{})
- if err != nil {
- t.Fatalf("cannot create transport for tests: %v", err)
- }
-
- return &test.Suite{
+func newTestSuite(url *url.URL, minimalData bool) *test.Suite[rest.Config] {
+ return &test.Suite[rest.Config]{
MinimalData: minimalData,
// NewConfig returns a config for a new temporary backend that will be used in tests.
- NewConfig: func() (interface{}, error) {
+ NewConfig: func() (*rest.Config, error) {
cfg := rest.NewConfig()
cfg.URL = url
- return cfg, nil
- },
-
- // CreateFn is a function that creates a temporary repository for the tests.
- Create: func(config interface{}) (restic.Backend, error) {
- cfg := config.(rest.Config)
- return rest.Create(context.TODO(), cfg, tr)
- },
-
- // OpenFn is a function that opens a previously created temporary repository.
- Open: func(config interface{}) (restic.Backend, error) {
- cfg := config.(rest.Config)
- return rest.Open(cfg, tr)
+ return &cfg, nil
},
- // CleanupFn removes data created during the tests.
- Cleanup: func(config interface{}) error {
- return nil
- },
+ Factory: rest.NewFactory(),
}
}
@@ -116,7 +94,7 @@ func TestBackendREST(t *testing.T) {
serverURL, cleanup := runRESTServer(ctx, t, dir)
defer cleanup()
- newTestSuite(ctx, t, serverURL, false).RunTests(t)
+ newTestSuite(serverURL, false).RunTests(t)
}
func TestBackendRESTExternalServer(t *testing.T) {
@@ -130,12 +108,7 @@ func TestBackendRESTExternalServer(t *testing.T) {
t.Fatal(err)
}
- c := cfg.(rest.Config)
-
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- newTestSuite(ctx, t, c.URL, true).RunTests(t)
+ newTestSuite(cfg.URL, true).RunTests(t)
}
func BenchmarkBackendREST(t *testing.B) {
@@ -146,5 +119,5 @@ func BenchmarkBackendREST(t *testing.B) {
serverURL, cleanup := runRESTServer(ctx, t, dir)
defer cleanup()
- newTestSuite(ctx, t, serverURL, false).RunBenchmarks(t)
+ newTestSuite(serverURL, false).RunBenchmarks(t)
}
diff --git a/internal/backend/retry/backend_retry.go b/internal/backend/retry/backend_retry.go
index b5f2706f4..9c51efedc 100644
--- a/internal/backend/retry/backend_retry.go
+++ b/internal/backend/retry/backend_retry.go
@@ -191,3 +191,7 @@ func (be *Backend) List(ctx context.Context, t restic.FileType, fn func(restic.F
return err
}
+
+func (be *Backend) Unwrap() restic.Backend {
+ return be.Backend
+}
diff --git a/internal/backend/retry/testing.go b/internal/backend/retry/testing.go
index 797573b03..ca019d202 100644
--- a/internal/backend/retry/testing.go
+++ b/internal/backend/retry/testing.go
@@ -3,6 +3,6 @@ package retry
import "testing"
// TestFastRetries reduces the initial retry delay to 1 millisecond
-func TestFastRetries(t testing.TB) {
+func TestFastRetries(_ testing.TB) {
fastRetries = true
}
diff --git a/internal/backend/s3/config.go b/internal/backend/s3/config.go
index 9050e20f4..8dcad9eee 100644
--- a/internal/backend/s3/config.go
+++ b/internal/backend/s3/config.go
@@ -2,11 +2,13 @@ package s3
import (
"net/url"
+ "os"
"path"
"strings"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/options"
+ "github.com/restic/restic/internal/restic"
)
// Config contains all configuration necessary to connect to an s3 compatible
@@ -44,7 +46,7 @@ func init() {
// supported configuration formats are s3://host/bucketname/prefix and
// s3:host/bucketname/prefix. The host can also be a valid s3 region
// name. If no prefix is given the prefix "restic" will be used.
-func ParseConfig(s string) (interface{}, error) {
+func ParseConfig(s string) (*Config, error) {
switch {
case strings.HasPrefix(s, "s3:http"):
// assume that a URL has been specified, parse it and
@@ -75,7 +77,7 @@ func ParseConfig(s string) (interface{}, error) {
return createConfig(endpoint, bucket, prefix, false)
}
-func createConfig(endpoint, bucket, prefix string, useHTTP bool) (interface{}, error) {
+func createConfig(endpoint, bucket, prefix string, useHTTP bool) (*Config, error) {
if endpoint == "" {
return nil, errors.New("s3: invalid format, host/region or bucket name not found")
}
@@ -89,5 +91,20 @@ func createConfig(endpoint, bucket, prefix string, useHTTP bool) (interface{}, e
cfg.UseHTTP = useHTTP
cfg.Bucket = bucket
cfg.Prefix = prefix
- return cfg, nil
+ return &cfg, nil
+}
+
+var _ restic.ApplyEnvironmenter = &Config{}
+
+// ApplyEnvironment saves values from the environment to the config.
+func (cfg *Config) ApplyEnvironment(prefix string) {
+ if cfg.KeyID == "" {
+ cfg.KeyID = os.Getenv(prefix + "AWS_ACCESS_KEY_ID")
+ }
+ if cfg.Secret.String() == "" {
+ cfg.Secret = options.NewSecretString(os.Getenv(prefix + "AWS_SECRET_ACCESS_KEY"))
+ }
+ if cfg.Region == "" {
+ cfg.Region = os.Getenv(prefix + "AWS_DEFAULT_REGION")
+ }
}
diff --git a/internal/backend/s3/config_test.go b/internal/backend/s3/config_test.go
index 821fbc244..085dbeedb 100644
--- a/internal/backend/s3/config_test.go
+++ b/internal/backend/s3/config_test.go
@@ -3,94 +3,111 @@ package s3
import (
"strings"
"testing"
+
+ "github.com/restic/restic/internal/backend/test"
)
-var configTests = []struct {
- s string
- cfg Config
-}{
- {"s3://eu-central-1/bucketname", Config{
+var configTests = []test.ConfigTestData[Config]{
+ {S: "s3://eu-central-1/bucketname", Cfg: Config{
Endpoint: "eu-central-1",
Bucket: "bucketname",
Prefix: "",
Connections: 5,
}},
- {"s3://eu-central-1/bucketname/", Config{
+ {S: "s3://eu-central-1/bucketname/", Cfg: Config{
Endpoint: "eu-central-1",
Bucket: "bucketname",
Prefix: "",
Connections: 5,
}},
- {"s3://eu-central-1/bucketname/prefix/directory", Config{
+ {S: "s3://eu-central-1/bucketname/prefix/directory", Cfg: Config{
Endpoint: "eu-central-1",
Bucket: "bucketname",
Prefix: "prefix/directory",
Connections: 5,
}},
- {"s3://eu-central-1/bucketname/prefix/directory/", Config{
+ {S: "s3://eu-central-1/bucketname/prefix/directory/", Cfg: Config{
Endpoint: "eu-central-1",
Bucket: "bucketname",
Prefix: "prefix/directory",
Connections: 5,
}},
- {"s3:eu-central-1/foobar", Config{
+ {S: "s3:eu-central-1/foobar", Cfg: Config{
Endpoint: "eu-central-1",
Bucket: "foobar",
Prefix: "",
Connections: 5,
}},
- {"s3:eu-central-1/foobar/", Config{
+ {S: "s3:eu-central-1/foobar/", Cfg: Config{
Endpoint: "eu-central-1",
Bucket: "foobar",
Prefix: "",
Connections: 5,
}},
- {"s3:eu-central-1/foobar/prefix/directory", Config{
+ {S: "s3:eu-central-1/foobar/prefix/directory", Cfg: Config{
Endpoint: "eu-central-1",
Bucket: "foobar",
Prefix: "prefix/directory",
Connections: 5,
}},
- {"s3:eu-central-1/foobar/prefix/directory/", Config{
+ {S: "s3:eu-central-1/foobar/prefix/directory/", Cfg: Config{
Endpoint: "eu-central-1",
Bucket: "foobar",
Prefix: "prefix/directory",
Connections: 5,
}},
- {"s3:https://hostname:9999/foobar", Config{
+ {S: "s3:hostname.foo/foobar", Cfg: Config{
+ Endpoint: "hostname.foo",
+ Bucket: "foobar",
+ Prefix: "",
+ Connections: 5,
+ }},
+ {S: "s3:hostname.foo/foobar/prefix/directory", Cfg: Config{
+ Endpoint: "hostname.foo",
+ Bucket: "foobar",
+ Prefix: "prefix/directory",
+ Connections: 5,
+ }},
+ {S: "s3:https://hostname/foobar", Cfg: Config{
+ Endpoint: "hostname",
+ Bucket: "foobar",
+ Prefix: "",
+ Connections: 5,
+ }},
+ {S: "s3:https://hostname:9999/foobar", Cfg: Config{
Endpoint: "hostname:9999",
Bucket: "foobar",
Prefix: "",
Connections: 5,
}},
- {"s3:https://hostname:9999/foobar/", Config{
+ {S: "s3:https://hostname:9999/foobar/", Cfg: Config{
Endpoint: "hostname:9999",
Bucket: "foobar",
Prefix: "",
Connections: 5,
}},
- {"s3:http://hostname:9999/foobar", Config{
+ {S: "s3:http://hostname:9999/foobar", Cfg: Config{
Endpoint: "hostname:9999",
Bucket: "foobar",
Prefix: "",
UseHTTP: true,
Connections: 5,
}},
- {"s3:http://hostname:9999/foobar/", Config{
+ {S: "s3:http://hostname:9999/foobar/", Cfg: Config{
Endpoint: "hostname:9999",
Bucket: "foobar",
Prefix: "",
UseHTTP: true,
Connections: 5,
}},
- {"s3:http://hostname:9999/bucket/prefix/directory", Config{
+ {S: "s3:http://hostname:9999/bucket/prefix/directory", Cfg: Config{
Endpoint: "hostname:9999",
Bucket: "bucket",
Prefix: "prefix/directory",
UseHTTP: true,
Connections: 5,
}},
- {"s3:http://hostname:9999/bucket/prefix/directory/", Config{
+ {S: "s3:http://hostname:9999/bucket/prefix/directory/", Cfg: Config{
Endpoint: "hostname:9999",
Bucket: "bucket",
Prefix: "prefix/directory",
@@ -100,19 +117,7 @@ var configTests = []struct {
}
func TestParseConfig(t *testing.T) {
- for i, test := range configTests {
- cfg, err := ParseConfig(test.s)
- if err != nil {
- t.Errorf("test %d:%s failed: %v", i, test.s, err)
- continue
- }
-
- if cfg != test.cfg {
- t.Errorf("test %d:\ninput:\n %s\n wrong config, want:\n %v\ngot:\n %v",
- i, test.s, test.cfg, cfg)
- continue
- }
- }
+ test.ParseConfigTester(t, ParseConfig, configTests)
}
func TestParseError(t *testing.T) {
diff --git a/internal/backend/s3/s3.go b/internal/backend/s3/s3.go
index ad652a206..3fe32d215 100644
--- a/internal/backend/s3/s3.go
+++ b/internal/backend/s3/s3.go
@@ -13,12 +13,11 @@ import (
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/layout"
- "github.com/restic/restic/internal/backend/sema"
+ "github.com/restic/restic/internal/backend/location"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
- "github.com/cenkalti/backoff/v4"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
)
@@ -26,7 +25,6 @@ import (
// Backend stores data on an S3 endpoint.
type Backend struct {
client *minio.Client
- sem sema.Semaphore
cfg Config
layout.Layout
}
@@ -34,11 +32,21 @@ type Backend struct {
// make sure that *Backend implements backend.Backend
var _ restic.Backend = &Backend{}
+func NewFactory() location.Factory {
+ return location.NewHTTPBackendFactory("s3", ParseConfig, location.NoPassword, Create, Open)
+}
+
const defaultLayout = "default"
func open(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) {
debug.Log("open, config %#v", cfg)
+ if cfg.KeyID == "" && cfg.Secret.String() != "" {
+ return nil, errors.Fatalf("unable to open S3 backend: Key ID ($AWS_ACCESS_KEY_ID) is empty")
+ } else if cfg.KeyID != "" && cfg.Secret.String() == "" {
+ return nil, errors.Fatalf("unable to open S3 backend: Secret ($AWS_SECRET_ACCESS_KEY) is empty")
+ }
+
if cfg.MaxRetries > 0 {
minio.MaxRetry = int(cfg.MaxRetries)
}
@@ -102,14 +110,8 @@ func open(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, erro
return nil, errors.Wrap(err, "minio.New")
}
- sem, err := sema.New(cfg.Connections)
- if err != nil {
- return nil, err
- }
-
be := &Backend{
client: client,
- sem: sem,
cfg: cfg,
}
@@ -169,8 +171,6 @@ func isAccessDenied(err error) bool {
// IsNotExist returns true if the error is caused by a not existing file.
func (be *Backend) IsNotExist(err error) bool {
- debug.Log("IsNotExist(%T, %#v)", err, err)
-
var e minio.ErrorResponse
return errors.As(err, &e) && e.Code == "NoSuchKey"
}
@@ -273,17 +273,8 @@ func (be *Backend) Path() string {
// Save stores data in the backend at the handle.
func (be *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
- debug.Log("Save %v", h)
-
- if err := h.Valid(); err != nil {
- return backoff.Permanent(err)
- }
-
objName := be.Filename(h)
- be.sem.GetToken()
- defer be.sem.ReleaseToken()
-
opts := minio.PutObjectOptions{StorageClass: be.cfg.StorageClass}
opts.ContentType = "application/octet-stream"
// the only option with the high-level api is to let the library handle the checksum computation
@@ -291,11 +282,8 @@ func (be *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindRe
// only use multipart uploads for very large files
opts.PartSize = 200 * 1024 * 1024
- debug.Log("PutObject(%v, %v, %v)", be.cfg.Bucket, objName, rd.Length())
info, err := be.client.PutObject(ctx, be.cfg.Bucket, objName, io.NopCloser(rd), int64(rd.Length()), opts)
- debug.Log("%v -> %v bytes, err %#v: %v", objName, info.Size, err, err)
-
// sanity check
if err == nil && info.Size != rd.Length() {
return errors.Errorf("wrote %d bytes instead of the expected %d bytes", info.Size, rd.Length())
@@ -307,32 +295,20 @@ func (be *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindRe
// Load runs fn with a reader that yields the contents of the file at h at the
// given offset.
func (be *Backend) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
return backend.DefaultLoad(ctx, h, length, offset, be.openReader, fn)
}
func (be *Backend) openReader(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
- debug.Log("Load %v, length %v, offset %v from %v", h, length, offset, be.Filename(h))
- if err := h.Valid(); err != nil {
- return nil, backoff.Permanent(err)
- }
-
- if offset < 0 {
- return nil, errors.New("offset is negative")
- }
-
- if length < 0 {
- return nil, errors.Errorf("invalid length %d", length)
- }
-
objName := be.Filename(h)
opts := minio.GetObjectOptions{}
var err error
if length > 0 {
- debug.Log("range: %v-%v", offset, offset+int64(length)-1)
err = opts.SetRange(offset, offset+int64(length)-1)
} else if offset > 0 {
- debug.Log("range: %v-", offset)
err = opts.SetRange(offset, 0)
}
@@ -340,41 +316,30 @@ func (be *Backend) openReader(ctx context.Context, h restic.Handle, length int,
return nil, errors.Wrap(err, "SetRange")
}
- be.sem.GetToken()
- ctx, cancel := context.WithCancel(ctx)
-
coreClient := minio.Core{Client: be.client}
rd, _, _, err := coreClient.GetObject(ctx, be.cfg.Bucket, objName, opts)
if err != nil {
- cancel()
- be.sem.ReleaseToken()
return nil, err
}
- return be.sem.ReleaseTokenOnClose(rd, cancel), err
+ return rd, err
}
// Stat returns information about a blob.
func (be *Backend) Stat(ctx context.Context, h restic.Handle) (bi restic.FileInfo, err error) {
- debug.Log("%v", h)
-
objName := be.Filename(h)
var obj *minio.Object
opts := minio.GetObjectOptions{}
- be.sem.GetToken()
obj, err = be.client.GetObject(ctx, be.cfg.Bucket, objName, opts)
if err != nil {
- debug.Log("GetObject() err %v", err)
- be.sem.ReleaseToken()
return restic.FileInfo{}, errors.Wrap(err, "client.GetObject")
}
// make sure that the object is closed properly.
defer func() {
e := obj.Close()
- be.sem.ReleaseToken()
if err == nil {
err = errors.Wrap(e, "Close")
}
@@ -382,7 +347,6 @@ func (be *Backend) Stat(ctx context.Context, h restic.Handle) (bi restic.FileInf
fi, err := obj.Stat()
if err != nil {
- debug.Log("Stat() err %v", err)
return restic.FileInfo{}, errors.Wrap(err, "Stat")
}
@@ -393,11 +357,7 @@ func (be *Backend) Stat(ctx context.Context, h restic.Handle) (bi restic.FileInf
func (be *Backend) Remove(ctx context.Context, h restic.Handle) error {
objName := be.Filename(h)
- be.sem.GetToken()
err := be.client.RemoveObject(ctx, be.cfg.Bucket, objName, minio.RemoveObjectOptions{})
- be.sem.ReleaseToken()
-
- debug.Log("Remove(%v) at %v -> err %v", h, objName, err)
if be.IsNotExist(err) {
err = nil
@@ -409,8 +369,6 @@ func (be *Backend) Remove(ctx context.Context, h restic.Handle) error {
// List runs fn for each file in the backend which has the type t. When an
// error occurs (or fn returns an error), List stops and returns it.
func (be *Backend) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error {
- debug.Log("listing %v", t)
-
prefix, recursive := be.Basedir(t)
// make sure prefix ends with a slash
@@ -464,30 +422,9 @@ func (be *Backend) List(ctx context.Context, t restic.FileType, fn func(restic.F
return ctx.Err()
}
-// Remove keys for a specified backend type.
-func (be *Backend) removeKeys(ctx context.Context, t restic.FileType) error {
- return be.List(ctx, restic.PackFile, func(fi restic.FileInfo) error {
- return be.Remove(ctx, restic.Handle{Type: t, Name: fi.Name})
- })
-}
-
// Delete removes all restic keys in the bucket. It will not remove the bucket itself.
func (be *Backend) Delete(ctx context.Context) error {
- alltypes := []restic.FileType{
- restic.PackFile,
- restic.KeyFile,
- restic.LockFile,
- restic.SnapshotFile,
- restic.IndexFile}
-
- for _, t := range alltypes {
- err := be.removeKeys(ctx, t)
- if err != nil {
- return nil
- }
- }
-
- return be.Remove(ctx, restic.Handle{Type: restic.ConfigFile})
+ return backend.DefaultDelete(ctx, be)
}
// Close does nothing
diff --git a/internal/backend/s3/s3_test.go b/internal/backend/s3/s3_test.go
index c024251a9..3051d8ddb 100644
--- a/internal/backend/s3/s3_test.go
+++ b/internal/backend/s3/s3_test.go
@@ -4,7 +4,6 @@ import (
"context"
"crypto/rand"
"encoding/hex"
- "errors"
"fmt"
"io"
"net"
@@ -15,7 +14,7 @@ import (
"testing"
"time"
- "github.com/restic/restic/internal/backend"
+ "github.com/restic/restic/internal/backend/location"
"github.com/restic/restic/internal/backend/s3"
"github.com/restic/restic/internal/backend/test"
"github.com/restic/restic/internal/options"
@@ -98,89 +97,42 @@ func newRandomCredentials(t testing.TB) (key, secret string) {
return key, secret
}
-type MinioTestConfig struct {
- s3.Config
-
- tempdir string
- stopServer func()
-}
+func newMinioTestSuite(t testing.TB) (*test.Suite[s3.Config], func()) {
+ ctx, cancel := context.WithCancel(context.Background())
-func createS3(t testing.TB, cfg MinioTestConfig, tr http.RoundTripper) (be restic.Backend, err error) {
- for i := 0; i < 10; i++ {
- be, err = s3.Create(context.TODO(), cfg.Config, tr)
- if err != nil {
- t.Logf("s3 open: try %d: error %v", i, err)
- time.Sleep(500 * time.Millisecond)
- continue
+ tempdir := rtest.TempDir(t)
+ key, secret := newRandomCredentials(t)
+ cleanup := runMinio(ctx, t, tempdir, key, secret)
+
+ return &test.Suite[s3.Config]{
+ // NewConfig returns a config for a new temporary backend that will be used in tests.
+ NewConfig: func() (*s3.Config, error) {
+ cfg := s3.NewConfig()
+ cfg.Endpoint = "localhost:9000"
+ cfg.Bucket = "restictestbucket"
+ cfg.Prefix = fmt.Sprintf("test-%d", time.Now().UnixNano())
+ cfg.UseHTTP = true
+ cfg.KeyID = key
+ cfg.Secret = options.NewSecretString(secret)
+ return &cfg, nil
+ },
+
+ Factory: location.NewHTTPBackendFactory("s3", s3.ParseConfig, location.NoPassword, func(ctx context.Context, cfg s3.Config, rt http.RoundTripper) (be restic.Backend, err error) {
+ for i := 0; i < 10; i++ {
+ be, err = s3.Create(ctx, cfg, rt)
+ if err != nil {
+ t.Logf("s3 open: try %d: error %v", i, err)
+ time.Sleep(500 * time.Millisecond)
+ continue
+ }
+ break
+ }
+ return be, err
+ }, s3.Open),
+ }, func() {
+ defer cancel()
+ defer cleanup()
}
-
- break
- }
-
- return be, err
-}
-
-func newMinioTestSuite(ctx context.Context, t testing.TB) *test.Suite {
- tr, err := backend.Transport(backend.TransportOptions{})
- if err != nil {
- t.Fatalf("cannot create transport for tests: %v", err)
- }
-
- return &test.Suite{
- // NewConfig returns a config for a new temporary backend that will be used in tests.
- NewConfig: func() (interface{}, error) {
- cfg := MinioTestConfig{}
-
- cfg.tempdir = rtest.TempDir(t)
- key, secret := newRandomCredentials(t)
- cfg.stopServer = runMinio(ctx, t, cfg.tempdir, key, secret)
-
- cfg.Config = s3.NewConfig()
- cfg.Config.Endpoint = "localhost:9000"
- cfg.Config.Bucket = "restictestbucket"
- cfg.Config.Prefix = fmt.Sprintf("test-%d", time.Now().UnixNano())
- cfg.Config.UseHTTP = true
- cfg.Config.KeyID = key
- cfg.Config.Secret = options.NewSecretString(secret)
- return cfg, nil
- },
-
- // CreateFn is a function that creates a temporary repository for the tests.
- Create: func(config interface{}) (restic.Backend, error) {
- cfg := config.(MinioTestConfig)
-
- be, err := createS3(t, cfg, tr)
- if err != nil {
- return nil, err
- }
-
- _, err = be.Stat(context.TODO(), restic.Handle{Type: restic.ConfigFile})
- if err != nil && !be.IsNotExist(err) {
- return nil, err
- }
-
- if err == nil {
- return nil, errors.New("config already exists")
- }
-
- return be, nil
- },
-
- // OpenFn is a function that opens a previously created temporary repository.
- Open: func(config interface{}) (restic.Backend, error) {
- cfg := config.(MinioTestConfig)
- return s3.Open(ctx, cfg.Config, tr)
- },
-
- // CleanupFn removes data created during the tests.
- Cleanup: func(config interface{}) error {
- cfg := config.(MinioTestConfig)
- if cfg.stopServer != nil {
- cfg.stopServer()
- }
- return nil
- },
- }
}
func TestBackendMinio(t *testing.T) {
@@ -197,10 +149,10 @@ func TestBackendMinio(t *testing.T) {
return
}
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ suite, cleanup := newMinioTestSuite(t)
+ defer cleanup()
- newMinioTestSuite(ctx, t).RunTests(t)
+ suite.RunTests(t)
}
func BenchmarkBackendMinio(t *testing.B) {
@@ -211,74 +163,31 @@ func BenchmarkBackendMinio(t *testing.B) {
return
}
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ suite, cleanup := newMinioTestSuite(t)
+ defer cleanup()
- newMinioTestSuite(ctx, t).RunBenchmarks(t)
+ suite.RunBenchmarks(t)
}
-func newS3TestSuite(t testing.TB) *test.Suite {
- tr, err := backend.Transport(backend.TransportOptions{})
- if err != nil {
- t.Fatalf("cannot create transport for tests: %v", err)
- }
-
- return &test.Suite{
+func newS3TestSuite() *test.Suite[s3.Config] {
+ return &test.Suite[s3.Config]{
// do not use excessive data
MinimalData: true,
// NewConfig returns a config for a new temporary backend that will be used in tests.
- NewConfig: func() (interface{}, error) {
- s3cfg, err := s3.ParseConfig(os.Getenv("RESTIC_TEST_S3_REPOSITORY"))
+ NewConfig: func() (*s3.Config, error) {
+ cfg, err := s3.ParseConfig(os.Getenv("RESTIC_TEST_S3_REPOSITORY"))
if err != nil {
return nil, err
}
- cfg := s3cfg.(s3.Config)
cfg.KeyID = os.Getenv("RESTIC_TEST_S3_KEY")
cfg.Secret = options.NewSecretString(os.Getenv("RESTIC_TEST_S3_SECRET"))
cfg.Prefix = fmt.Sprintf("test-%d", time.Now().UnixNano())
return cfg, nil
},
- // CreateFn is a function that creates a temporary repository for the tests.
- Create: func(config interface{}) (restic.Backend, error) {
- cfg := config.(s3.Config)
-
- be, err := s3.Create(context.TODO(), cfg, tr)
- if err != nil {
- return nil, err
- }
-
- _, err = be.Stat(context.TODO(), restic.Handle{Type: restic.ConfigFile})
- if err != nil && !be.IsNotExist(err) {
- return nil, err
- }
-
- if err == nil {
- return nil, errors.New("config already exists")
- }
-
- return be, nil
- },
-
- // OpenFn is a function that opens a previously created temporary repository.
- Open: func(config interface{}) (restic.Backend, error) {
- cfg := config.(s3.Config)
- return s3.Open(context.TODO(), cfg, tr)
- },
-
- // CleanupFn removes data created during the tests.
- Cleanup: func(config interface{}) error {
- cfg := config.(s3.Config)
-
- be, err := s3.Open(context.TODO(), cfg, tr)
- if err != nil {
- return err
- }
-
- return be.Delete(context.TODO())
- },
+ Factory: s3.NewFactory(),
}
}
@@ -303,7 +212,7 @@ func TestBackendS3(t *testing.T) {
}
t.Logf("run tests")
- newS3TestSuite(t).RunTests(t)
+ newS3TestSuite().RunTests(t)
}
func BenchmarkBackendS3(t *testing.B) {
@@ -321,5 +230,5 @@ func BenchmarkBackendS3(t *testing.B) {
}
t.Logf("run tests")
- newS3TestSuite(t).RunBenchmarks(t)
+ newS3TestSuite().RunBenchmarks(t)
}
diff --git a/internal/backend/sema/backend.go b/internal/backend/sema/backend.go
new file mode 100644
index 000000000..d60788f26
--- /dev/null
+++ b/internal/backend/sema/backend.go
@@ -0,0 +1,130 @@
+package sema
+
+import (
+ "context"
+ "io"
+ "sync"
+
+ "github.com/cenkalti/backoff/v4"
+ "github.com/restic/restic/internal/errors"
+ "github.com/restic/restic/internal/restic"
+)
+
+// make sure that connectionLimitedBackend implements restic.Backend
+var _ restic.Backend = &connectionLimitedBackend{}
+
+// connectionLimitedBackend limits the number of concurrent operations.
+type connectionLimitedBackend struct {
+ restic.Backend
+ sem semaphore
+ freezeLock sync.Mutex
+}
+
+// NewBackend creates a backend that limits the concurrent operations on the underlying backend
+func NewBackend(be restic.Backend) restic.Backend {
+ sem, err := newSemaphore(be.Connections())
+ if err != nil {
+ panic(err)
+ }
+
+ return &connectionLimitedBackend{
+ Backend: be,
+ sem: sem,
+ }
+}
+
+// typeDependentLimit acquire a token unless the FileType is a lock file. The returned function
+// must be called to release the token.
+func (be *connectionLimitedBackend) typeDependentLimit(t restic.FileType) func() {
+ // allow concurrent lock file operations to ensure that the lock refresh is always possible
+ if t == restic.LockFile {
+ return func() {}
+ }
+ be.sem.GetToken()
+ // prevent token usage while the backend is frozen
+ be.freezeLock.Lock()
+ defer be.freezeLock.Unlock()
+
+ return be.sem.ReleaseToken
+}
+
+// Freeze blocks all backend operations except those on lock files
+func (be *connectionLimitedBackend) Freeze() {
+ be.freezeLock.Lock()
+}
+
+// Unfreeze allows all backend operations to continue
+func (be *connectionLimitedBackend) Unfreeze() {
+ be.freezeLock.Unlock()
+}
+
+// Save adds new Data to the backend.
+func (be *connectionLimitedBackend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
+ if err := h.Valid(); err != nil {
+ return backoff.Permanent(err)
+ }
+
+ defer be.typeDependentLimit(h.Type)()
+
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+
+ return be.Backend.Save(ctx, h, rd)
+}
+
+// Load runs fn with a reader that yields the contents of the file at h at the
+// given offset.
+func (be *connectionLimitedBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
+ if err := h.Valid(); err != nil {
+ return backoff.Permanent(err)
+ }
+ if offset < 0 {
+ return backoff.Permanent(errors.New("offset is negative"))
+ }
+ if length < 0 {
+ return backoff.Permanent(errors.Errorf("invalid length %d", length))
+ }
+
+ defer be.typeDependentLimit(h.Type)()
+
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+
+ return be.Backend.Load(ctx, h, length, offset, fn)
+}
+
+// Stat returns information about a file in the backend.
+func (be *connectionLimitedBackend) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) {
+ if err := h.Valid(); err != nil {
+ return restic.FileInfo{}, backoff.Permanent(err)
+ }
+
+ defer be.typeDependentLimit(h.Type)()
+
+ if ctx.Err() != nil {
+ return restic.FileInfo{}, ctx.Err()
+ }
+
+ return be.Backend.Stat(ctx, h)
+}
+
+// Remove deletes a file from the backend.
+func (be *connectionLimitedBackend) Remove(ctx context.Context, h restic.Handle) error {
+ if err := h.Valid(); err != nil {
+ return backoff.Permanent(err)
+ }
+
+ defer be.typeDependentLimit(h.Type)()
+
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+
+ return be.Backend.Remove(ctx, h)
+}
+
+func (be *connectionLimitedBackend) Unwrap() restic.Backend {
+ return be.Backend
+}
diff --git a/internal/backend/sema/backend_test.go b/internal/backend/sema/backend_test.go
new file mode 100644
index 000000000..a1dd16187
--- /dev/null
+++ b/internal/backend/sema/backend_test.go
@@ -0,0 +1,235 @@
+package sema_test
+
+import (
+ "context"
+ "io"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/restic/restic/internal/backend/mock"
+ "github.com/restic/restic/internal/backend/sema"
+ "github.com/restic/restic/internal/restic"
+ "github.com/restic/restic/internal/test"
+ "golang.org/x/sync/errgroup"
+)
+
+func TestParameterValidationSave(t *testing.T) {
+ m := mock.NewBackend()
+ m.SaveFn = func(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
+ return nil
+ }
+ be := sema.NewBackend(m)
+
+ err := be.Save(context.TODO(), restic.Handle{}, nil)
+ test.Assert(t, err != nil, "Save() with invalid handle did not return an error")
+}
+
+func TestParameterValidationLoad(t *testing.T) {
+ m := mock.NewBackend()
+ m.OpenReaderFn = func(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
+ return io.NopCloser(nil), nil
+ }
+
+ be := sema.NewBackend(m)
+ nilCb := func(rd io.Reader) error { return nil }
+
+ err := be.Load(context.TODO(), restic.Handle{}, 10, 0, nilCb)
+ test.Assert(t, err != nil, "Load() with invalid handle did not return an error")
+
+ h := restic.Handle{Type: restic.PackFile, Name: "foobar"}
+ err = be.Load(context.TODO(), h, 10, -1, nilCb)
+ test.Assert(t, err != nil, "Save() with negative offset did not return an error")
+ err = be.Load(context.TODO(), h, -1, 0, nilCb)
+ test.Assert(t, err != nil, "Save() with negative length did not return an error")
+}
+
+func TestParameterValidationStat(t *testing.T) {
+ m := mock.NewBackend()
+ m.StatFn = func(ctx context.Context, h restic.Handle) (restic.FileInfo, error) {
+ return restic.FileInfo{}, nil
+ }
+ be := sema.NewBackend(m)
+
+ _, err := be.Stat(context.TODO(), restic.Handle{})
+ test.Assert(t, err != nil, "Stat() with invalid handle did not return an error")
+}
+
+func TestParameterValidationRemove(t *testing.T) {
+ m := mock.NewBackend()
+ m.RemoveFn = func(ctx context.Context, h restic.Handle) error {
+ return nil
+ }
+ be := sema.NewBackend(m)
+
+ err := be.Remove(context.TODO(), restic.Handle{})
+ test.Assert(t, err != nil, "Remove() with invalid handle did not return an error")
+}
+
+func TestUnwrap(t *testing.T) {
+ m := mock.NewBackend()
+ be := sema.NewBackend(m)
+
+ unwrapper := be.(restic.BackendUnwrapper)
+ test.Assert(t, unwrapper.Unwrap() == m, "Unwrap() returned wrong backend")
+}
+
+func countingBlocker() (func(), func(int) int) {
+ ctr := int64(0)
+ blocker := make(chan struct{})
+
+ wait := func() {
+ // count how many goroutines were allowed by the semaphore
+ atomic.AddInt64(&ctr, 1)
+ // block until the test can retrieve the counter
+ <-blocker
+ }
+
+ unblock := func(expected int) int {
+ // give goroutines enough time to block
+ var blocked int64
+ for i := 0; i < 100 && blocked < int64(expected); i++ {
+ time.Sleep(100 * time.Microsecond)
+ blocked = atomic.LoadInt64(&ctr)
+ }
+ close(blocker)
+ return int(blocked)
+ }
+
+ return wait, unblock
+}
+
+func concurrencyTester(t *testing.T, setup func(m *mock.Backend), handler func(be restic.Backend) func() error, unblock func(int) int, isUnlimited bool) {
+ expectBlocked := int(2)
+ workerCount := expectBlocked + 1
+
+ m := mock.NewBackend()
+ setup(m)
+ m.ConnectionsFn = func() uint { return uint(expectBlocked) }
+ be := sema.NewBackend(m)
+
+ var wg errgroup.Group
+ for i := 0; i < workerCount; i++ {
+ wg.Go(handler(be))
+ }
+
+ if isUnlimited {
+ expectBlocked = workerCount
+ }
+ blocked := unblock(expectBlocked)
+ test.Assert(t, blocked == expectBlocked, "Unexpected number of goroutines blocked: %v", blocked)
+ test.OK(t, wg.Wait())
+}
+
+func TestConcurrencyLimitSave(t *testing.T) {
+ wait, unblock := countingBlocker()
+ concurrencyTester(t, func(m *mock.Backend) {
+ m.SaveFn = func(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
+ wait()
+ return nil
+ }
+ }, func(be restic.Backend) func() error {
+ return func() error {
+ h := restic.Handle{Type: restic.PackFile, Name: "foobar"}
+ return be.Save(context.TODO(), h, nil)
+ }
+ }, unblock, false)
+}
+
+func TestConcurrencyLimitLoad(t *testing.T) {
+ wait, unblock := countingBlocker()
+ concurrencyTester(t, func(m *mock.Backend) {
+ m.OpenReaderFn = func(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
+ wait()
+ return io.NopCloser(nil), nil
+ }
+ }, func(be restic.Backend) func() error {
+ return func() error {
+ h := restic.Handle{Type: restic.PackFile, Name: "foobar"}
+ nilCb := func(rd io.Reader) error { return nil }
+ return be.Load(context.TODO(), h, 10, 0, nilCb)
+ }
+ }, unblock, false)
+}
+
+func TestConcurrencyLimitStat(t *testing.T) {
+ wait, unblock := countingBlocker()
+ concurrencyTester(t, func(m *mock.Backend) {
+ m.StatFn = func(ctx context.Context, h restic.Handle) (restic.FileInfo, error) {
+ wait()
+ return restic.FileInfo{}, nil
+ }
+ }, func(be restic.Backend) func() error {
+ return func() error {
+ h := restic.Handle{Type: restic.PackFile, Name: "foobar"}
+ _, err := be.Stat(context.TODO(), h)
+ return err
+ }
+ }, unblock, false)
+}
+
+func TestConcurrencyLimitDelete(t *testing.T) {
+ wait, unblock := countingBlocker()
+ concurrencyTester(t, func(m *mock.Backend) {
+ m.RemoveFn = func(ctx context.Context, h restic.Handle) error {
+ wait()
+ return nil
+ }
+ }, func(be restic.Backend) func() error {
+ return func() error {
+ h := restic.Handle{Type: restic.PackFile, Name: "foobar"}
+ return be.Remove(context.TODO(), h)
+ }
+ }, unblock, false)
+}
+
+func TestConcurrencyUnlimitedLockSave(t *testing.T) {
+ wait, unblock := countingBlocker()
+ concurrencyTester(t, func(m *mock.Backend) {
+ m.SaveFn = func(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
+ wait()
+ return nil
+ }
+ }, func(be restic.Backend) func() error {
+ return func() error {
+ h := restic.Handle{Type: restic.LockFile, Name: "foobar"}
+ return be.Save(context.TODO(), h, nil)
+ }
+ }, unblock, true)
+}
+
+func TestFreeze(t *testing.T) {
+ var counter int64
+ m := mock.NewBackend()
+ m.SaveFn = func(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
+ atomic.AddInt64(&counter, 1)
+ return nil
+ }
+ m.ConnectionsFn = func() uint { return 2 }
+ be := sema.NewBackend(m)
+ fb := be.(restic.FreezeBackend)
+
+ // Freeze backend
+ fb.Freeze()
+
+ // Start Save call that should block
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ h := restic.Handle{Type: restic.PackFile, Name: "foobar"}
+ test.OK(t, be.Save(context.TODO(), h, nil))
+ }()
+
+ // check
+ time.Sleep(1 * time.Millisecond)
+ val := atomic.LoadInt64(&counter)
+ test.Assert(t, val == 0, "save call worked despite frozen backend")
+
+ // unfreeze and check that save did complete
+ fb.Unfreeze()
+ wg.Wait()
+ val = atomic.LoadInt64(&counter)
+ test.Assert(t, val == 1, "save call should have completed")
+}
diff --git a/internal/backend/sema/semaphore.go b/internal/backend/sema/semaphore.go
index 7ee912979..c664eef7c 100644
--- a/internal/backend/sema/semaphore.go
+++ b/internal/backend/sema/semaphore.go
@@ -2,64 +2,30 @@
package sema
import (
- "context"
- "io"
-
+ "github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
)
-// A Semaphore limits access to a restricted resource.
-type Semaphore struct {
+// A semaphore limits access to a restricted resource.
+type semaphore struct {
ch chan struct{}
}
-// New returns a new semaphore with capacity n.
-func New(n uint) (Semaphore, error) {
+// newSemaphore returns a new semaphore with capacity n.
+func newSemaphore(n uint) (semaphore, error) {
if n == 0 {
- return Semaphore{}, errors.New("capacity must be a positive number")
+ return semaphore{}, errors.New("capacity must be a positive number")
}
- return Semaphore{
+ return semaphore{
ch: make(chan struct{}, n),
}, nil
}
// GetToken blocks until a Token is available.
-func (s Semaphore) GetToken() { s.ch <- struct{}{} }
-
-// ReleaseToken returns a token.
-func (s Semaphore) ReleaseToken() { <-s.ch }
-
-// ReleaseTokenOnClose wraps an io.ReadCloser to return a token on Close.
-// Before returning the token, cancel, if not nil, will be run
-// to free up context resources.
-func (s Semaphore) ReleaseTokenOnClose(rc io.ReadCloser, cancel context.CancelFunc) io.ReadCloser {
- return &wrapReader{ReadCloser: rc, sem: s, cancel: cancel}
-}
-
-type wrapReader struct {
- io.ReadCloser
- eofSeen bool
- sem Semaphore
- cancel context.CancelFunc
-}
-
-func (wr *wrapReader) Read(p []byte) (int, error) {
- if wr.eofSeen { // XXX Why do we do this?
- return 0, io.EOF
- }
-
- n, err := wr.ReadCloser.Read(p)
- if err == io.EOF {
- wr.eofSeen = true
- }
- return n, err
+func (s semaphore) GetToken() {
+ s.ch <- struct{}{}
+ debug.Log("acquired token")
}
-func (wr *wrapReader) Close() error {
- err := wr.ReadCloser.Close()
- if wr.cancel != nil {
- wr.cancel()
- }
- wr.sem.ReleaseToken()
- return err
-}
+// ReleaseToken returns a token.
+func (s semaphore) ReleaseToken() { <-s.ch }
diff --git a/internal/backend/sftp/config.go b/internal/backend/sftp/config.go
index 76d6d145d..ed7c2cafa 100644
--- a/internal/backend/sftp/config.go
+++ b/internal/backend/sftp/config.go
@@ -35,7 +35,7 @@ func init() {
// and sftp:user@host:directory. The directory will be path Cleaned and can
// be an absolute path if it starts with a '/' (e.g.
// sftp://user@host//absolute and sftp:user@host:/absolute).
-func ParseConfig(s string) (interface{}, error) {
+func ParseConfig(s string) (*Config, error) {
var user, host, port, dir string
switch {
case strings.HasPrefix(s, "sftp://"):
@@ -80,7 +80,7 @@ func ParseConfig(s string) (interface{}, error) {
p := path.Clean(dir)
if strings.HasPrefix(p, "~") {
- return nil, errors.Fatal("sftp path starts with the tilde (~) character, that fails for most sftp servers.\nUse a relative directory, most servers interpret this as relative to the user's home directory.")
+ return nil, errors.New("sftp path starts with the tilde (~) character, that fails for most sftp servers.\nUse a relative directory, most servers interpret this as relative to the user's home directory")
}
cfg := NewConfig()
@@ -89,5 +89,5 @@ func ParseConfig(s string) (interface{}, error) {
cfg.Port = port
cfg.Path = p
- return cfg, nil
+ return &cfg, nil
}
diff --git a/internal/backend/sftp/config_test.go b/internal/backend/sftp/config_test.go
index 3772c038b..bf7fa9653 100644
--- a/internal/backend/sftp/config_test.go
+++ b/internal/backend/sftp/config_test.go
@@ -2,94 +2,81 @@ package sftp
import (
"testing"
+
+ "github.com/restic/restic/internal/backend/test"
)
-var configTests = []struct {
- in string
- cfg Config
-}{
+var configTests = []test.ConfigTestData[Config]{
// first form, user specified sftp://user@host/dir
{
- "sftp://user@host/dir/subdir",
- Config{User: "user", Host: "host", Path: "dir/subdir", Connections: 5},
+ S: "sftp://user@host/dir/subdir",
+ Cfg: Config{User: "user", Host: "host", Path: "dir/subdir", Connections: 5},
},
{
- "sftp://host/dir/subdir",
- Config{Host: "host", Path: "dir/subdir", Connections: 5},
+ S: "sftp://host/dir/subdir",
+ Cfg: Config{Host: "host", Path: "dir/subdir", Connections: 5},
},
{
- "sftp://host//dir/subdir",
- Config{Host: "host", Path: "/dir/subdir", Connections: 5},
+ S: "sftp://host//dir/subdir",
+ Cfg: Config{Host: "host", Path: "/dir/subdir", Connections: 5},
},
{
- "sftp://host:10022//dir/subdir",
- Config{Host: "host", Port: "10022", Path: "/dir/subdir", Connections: 5},
+ S: "sftp://host:10022//dir/subdir",
+ Cfg: Config{Host: "host", Port: "10022", Path: "/dir/subdir", Connections: 5},
},
{
- "sftp://user@host:10022//dir/subdir",
- Config{User: "user", Host: "host", Port: "10022", Path: "/dir/subdir", Connections: 5},
+ S: "sftp://user@host:10022//dir/subdir",
+ Cfg: Config{User: "user", Host: "host", Port: "10022", Path: "/dir/subdir", Connections: 5},
},
{
- "sftp://user@host/dir/subdir/../other",
- Config{User: "user", Host: "host", Path: "dir/other", Connections: 5},
+ S: "sftp://user@host/dir/subdir/../other",
+ Cfg: Config{User: "user", Host: "host", Path: "dir/other", Connections: 5},
},
{
- "sftp://user@host/dir///subdir",
- Config{User: "user", Host: "host", Path: "dir/subdir", Connections: 5},
+ S: "sftp://user@host/dir///subdir",
+ Cfg: Config{User: "user", Host: "host", Path: "dir/subdir", Connections: 5},
},
// IPv6 address.
{
- "sftp://user@[::1]/dir",
- Config{User: "user", Host: "::1", Path: "dir", Connections: 5},
+ S: "sftp://user@[::1]/dir",
+ Cfg: Config{User: "user", Host: "::1", Path: "dir", Connections: 5},
},
// IPv6 address with port.
{
- "sftp://user@[::1]:22/dir",
- Config{User: "user", Host: "::1", Port: "22", Path: "dir", Connections: 5},
+ S: "sftp://user@[::1]:22/dir",
+ Cfg: Config{User: "user", Host: "::1", Port: "22", Path: "dir", Connections: 5},
},
// second form, user specified sftp:user@host:/dir
{
- "sftp:user@host:/dir/subdir",
- Config{User: "user", Host: "host", Path: "/dir/subdir", Connections: 5},
+ S: "sftp:user@host:/dir/subdir",
+ Cfg: Config{User: "user", Host: "host", Path: "/dir/subdir", Connections: 5},
},
{
- "sftp:user@domain@host:/dir/subdir",
- Config{User: "user@domain", Host: "host", Path: "/dir/subdir", Connections: 5},
+ S: "sftp:user@domain@host:/dir/subdir",
+ Cfg: Config{User: "user@domain", Host: "host", Path: "/dir/subdir", Connections: 5},
},
{
- "sftp:host:../dir/subdir",
- Config{Host: "host", Path: "../dir/subdir", Connections: 5},
+ S: "sftp:host:../dir/subdir",
+ Cfg: Config{Host: "host", Path: "../dir/subdir", Connections: 5},
},
{
- "sftp:user@host:dir/subdir:suffix",
- Config{User: "user", Host: "host", Path: "dir/subdir:suffix", Connections: 5},
+ S: "sftp:user@host:dir/subdir:suffix",
+ Cfg: Config{User: "user", Host: "host", Path: "dir/subdir:suffix", Connections: 5},
},
{
- "sftp:user@host:dir/subdir/../other",
- Config{User: "user", Host: "host", Path: "dir/other", Connections: 5},
+ S: "sftp:user@host:dir/subdir/../other",
+ Cfg: Config{User: "user", Host: "host", Path: "dir/other", Connections: 5},
},
{
- "sftp:user@host:dir///subdir",
- Config{User: "user", Host: "host", Path: "dir/subdir", Connections: 5},
+ S: "sftp:user@host:dir///subdir",
+ Cfg: Config{User: "user", Host: "host", Path: "dir/subdir", Connections: 5},
},
}
func TestParseConfig(t *testing.T) {
- for i, test := range configTests {
- cfg, err := ParseConfig(test.in)
- if err != nil {
- t.Errorf("test %d:%s failed: %v", i, test.in, err)
- continue
- }
-
- if cfg != test.cfg {
- t.Errorf("test %d:\ninput:\n %s\n wrong config, want:\n %v\ngot:\n %v",
- i, test.in, test.cfg, cfg)
- continue
- }
- }
+ test.ParseConfigTester(t, ParseConfig, configTests)
}
var configTestsInvalid = []string{
diff --git a/internal/backend/sftp/sftp.go b/internal/backend/sftp/sftp.go
index 514dd58da..3e127ef05 100644
--- a/internal/backend/sftp/sftp.go
+++ b/internal/backend/sftp/sftp.go
@@ -15,7 +15,8 @@ import (
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/layout"
- "github.com/restic/restic/internal/backend/sema"
+ "github.com/restic/restic/internal/backend/limiter"
+ "github.com/restic/restic/internal/backend/location"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
@@ -35,7 +36,6 @@ type SFTP struct {
posixRename bool
- sem sema.Semaphore
layout.Layout
Config
backend.Modes
@@ -43,6 +43,10 @@ type SFTP struct {
var _ restic.Backend = &SFTP{}
+func NewFactory() location.Factory {
+ return location.NewLimitedBackendFactory("sftp", ParseConfig, location.NoPassword, limiter.WrapBackendConstructor(Create), limiter.WrapBackendConstructor(Open))
+}
+
const defaultLayout = "default"
func startClient(cfg Config) (*SFTP, error) {
@@ -140,11 +144,7 @@ func Open(ctx context.Context, cfg Config) (*SFTP, error) {
}
func open(ctx context.Context, sftp *SFTP, cfg Config) (*SFTP, error) {
- sem, err := sema.New(cfg.Connections)
- if err != nil {
- return nil, err
- }
-
+ var err error
sftp.Layout, err = layout.ParseLayout(ctx, sftp, cfg.Layout, defaultLayout, cfg.Path)
if err != nil {
return nil, err
@@ -158,7 +158,6 @@ func open(ctx context.Context, sftp *SFTP, cfg Config) (*SFTP, error) {
sftp.Config = cfg
sftp.p = cfg.Path
- sftp.sem = sem
sftp.Modes = m
return sftp, nil
}
@@ -194,7 +193,7 @@ func (r *SFTP) Join(p ...string) string {
}
// ReadDir returns the entries for a directory.
-func (r *SFTP) ReadDir(ctx context.Context, dir string) ([]os.FileInfo, error) {
+func (r *SFTP) ReadDir(_ context.Context, dir string) ([]os.FileInfo, error) {
fi, err := r.c.ReadDir(dir)
// sftp client does not specify dir name on error, so add it here
@@ -303,23 +302,15 @@ func tempSuffix() string {
}
// Save stores data in the backend at the handle.
-func (r *SFTP) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
- debug.Log("Save %v", h)
+func (r *SFTP) Save(_ context.Context, h restic.Handle, rd restic.RewindReader) error {
if err := r.clientError(); err != nil {
return err
}
- if err := h.Valid(); err != nil {
- return backoff.Permanent(err)
- }
-
filename := r.Filename(h)
tmpFilename := filename + "-restic-temp-" + tempSuffix()
dirname := r.Dirname(h)
- r.sem.GetToken()
- defer r.sem.ReleaseToken()
-
// create new file
f, err := r.c.OpenFile(tmpFilename, os.O_CREATE|os.O_EXCL|os.O_WRONLY)
@@ -415,77 +406,35 @@ func (r *SFTP) Load(ctx context.Context, h restic.Handle, length int, offset int
return backend.DefaultLoad(ctx, h, length, offset, r.openReader, fn)
}
-// wrapReader wraps an io.ReadCloser to run an additional function on Close.
-type wrapReader struct {
- io.ReadCloser
- io.WriterTo
- f func()
-}
-
-func (wr *wrapReader) Close() error {
- err := wr.ReadCloser.Close()
- wr.f()
- return err
-}
-
-func (r *SFTP) openReader(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
- debug.Log("Load %v, length %v, offset %v", h, length, offset)
- if err := h.Valid(); err != nil {
- return nil, backoff.Permanent(err)
- }
-
- if offset < 0 {
- return nil, errors.New("offset is negative")
- }
-
- r.sem.GetToken()
+func (r *SFTP) openReader(_ context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
f, err := r.c.Open(r.Filename(h))
if err != nil {
- r.sem.ReleaseToken()
return nil, err
}
if offset > 0 {
_, err = f.Seek(offset, 0)
if err != nil {
- r.sem.ReleaseToken()
_ = f.Close()
return nil, err
}
}
- // use custom close wrapper to also provide WriteTo() on the wrapper
- rd := &wrapReader{
- ReadCloser: f,
- WriterTo: f,
- f: func() {
- r.sem.ReleaseToken()
- },
- }
-
if length > 0 {
// unlimited reads usually use io.Copy which needs WriteTo support at the underlying reader
// limited reads are usually combined with io.ReadFull which reads all required bytes into a buffer in one go
- return backend.LimitReadCloser(rd, int64(length)), nil
+ return backend.LimitReadCloser(f, int64(length)), nil
}
- return rd, nil
+ return f, nil
}
// Stat returns information about a blob.
-func (r *SFTP) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) {
- debug.Log("Stat(%v)", h)
+func (r *SFTP) Stat(_ context.Context, h restic.Handle) (restic.FileInfo, error) {
if err := r.clientError(); err != nil {
return restic.FileInfo{}, err
}
- if err := h.Valid(); err != nil {
- return restic.FileInfo{}, backoff.Permanent(err)
- }
-
- r.sem.GetToken()
- defer r.sem.ReleaseToken()
-
fi, err := r.c.Lstat(r.Filename(h))
if err != nil {
return restic.FileInfo{}, errors.Wrap(err, "Lstat")
@@ -495,29 +444,21 @@ func (r *SFTP) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, erro
}
// Remove removes the content stored at name.
-func (r *SFTP) Remove(ctx context.Context, h restic.Handle) error {
- debug.Log("Remove(%v)", h)
+func (r *SFTP) Remove(_ context.Context, h restic.Handle) error {
if err := r.clientError(); err != nil {
return err
}
- r.sem.GetToken()
- defer r.sem.ReleaseToken()
-
return r.c.Remove(r.Filename(h))
}
// List runs fn for each file in the backend which has the type t. When an
// error occurs (or fn returns an error), List stops and returns it.
func (r *SFTP) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error {
- debug.Log("List %v", t)
-
basedir, subdirs := r.Basedir(t)
walker := r.c.Walk(basedir)
for {
- r.sem.GetToken()
ok := walker.Step()
- r.sem.ReleaseToken()
if !ok {
break
}
@@ -572,7 +513,6 @@ var closeTimeout = 2 * time.Second
// Close closes the sftp connection and terminates the underlying command.
func (r *SFTP) Close() error {
- debug.Log("Close")
if r == nil {
return nil
}
diff --git a/internal/backend/sftp/sftp_test.go b/internal/backend/sftp/sftp_test.go
index 0dbcd291c..75adc0c6b 100644
--- a/internal/backend/sftp/sftp_test.go
+++ b/internal/backend/sftp/sftp_test.go
@@ -1,7 +1,6 @@
package sftp_test
import (
- "context"
"fmt"
"os"
"path/filepath"
@@ -11,7 +10,6 @@ import (
"github.com/restic/restic/internal/backend/sftp"
"github.com/restic/restic/internal/backend/test"
"github.com/restic/restic/internal/errors"
- "github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
@@ -29,18 +27,14 @@ func findSFTPServerBinary() string {
var sftpServer = findSFTPServerBinary()
-func newTestSuite(t testing.TB) *test.Suite {
- return &test.Suite{
+func newTestSuite(t testing.TB) *test.Suite[sftp.Config] {
+ return &test.Suite[sftp.Config]{
// NewConfig returns a config for a new temporary backend that will be used in tests.
- NewConfig: func() (interface{}, error) {
- dir, err := os.MkdirTemp(rtest.TestTempDir, "restic-test-sftp-")
- if err != nil {
- t.Fatal(err)
- }
-
+ NewConfig: func() (*sftp.Config, error) {
+ dir := rtest.TempDir(t)
t.Logf("create new backend at %v", dir)
- cfg := sftp.Config{
+ cfg := &sftp.Config{
Path: dir,
Command: fmt.Sprintf("%q -e", sftpServer),
Connections: 5,
@@ -48,28 +42,7 @@ func newTestSuite(t testing.TB) *test.Suite {
return cfg, nil
},
- // CreateFn is a function that creates a temporary repository for the tests.
- Create: func(config interface{}) (restic.Backend, error) {
- cfg := config.(sftp.Config)
- return sftp.Create(context.TODO(), cfg)
- },
-
- // OpenFn is a function that opens a previously created temporary repository.
- Open: func(config interface{}) (restic.Backend, error) {
- cfg := config.(sftp.Config)
- return sftp.Open(context.TODO(), cfg)
- },
-
- // CleanupFn removes data created during the tests.
- Cleanup: func(config interface{}) error {
- cfg := config.(sftp.Config)
- if !rtest.TestCleanupTempDirs {
- t.Logf("leaving test backend dir at %v", cfg.Path)
- }
-
- rtest.RemoveAll(t, cfg.Path)
- return nil
- },
+ Factory: sftp.NewFactory(),
}
}
diff --git a/internal/backend/swift/config.go b/internal/backend/swift/config.go
index ced256752..5be2d9ce0 100644
--- a/internal/backend/swift/config.go
+++ b/internal/backend/swift/config.go
@@ -6,6 +6,7 @@ import (
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/options"
+ "github.com/restic/restic/internal/restic"
)
// Config contains basic configuration needed to specify swift location for a swift server
@@ -50,7 +51,7 @@ func NewConfig() Config {
}
// ParseConfig parses the string s and extract swift's container name and prefix.
-func ParseConfig(s string) (interface{}, error) {
+func ParseConfig(s string) (*Config, error) {
if !strings.HasPrefix(s, "swift:") {
return nil, errors.New("invalid URL, expected: swift:container-name:/[prefix]")
}
@@ -70,48 +71,49 @@ func ParseConfig(s string) (interface{}, error) {
cfg.Container = container
cfg.Prefix = prefix
- return cfg, nil
+ return &cfg, nil
}
+var _ restic.ApplyEnvironmenter = &Config{}
+
// ApplyEnvironment saves values from the environment to the config.
-func ApplyEnvironment(prefix string, cfg interface{}) error {
- c := cfg.(*Config)
+func (cfg *Config) ApplyEnvironment(prefix string) {
for _, val := range []struct {
s *string
env string
}{
// v2/v3 specific
- {&c.UserName, prefix + "OS_USERNAME"},
- {&c.APIKey, prefix + "OS_PASSWORD"},
- {&c.Region, prefix + "OS_REGION_NAME"},
- {&c.AuthURL, prefix + "OS_AUTH_URL"},
+ {&cfg.UserName, prefix + "OS_USERNAME"},
+ {&cfg.APIKey, prefix + "OS_PASSWORD"},
+ {&cfg.Region, prefix + "OS_REGION_NAME"},
+ {&cfg.AuthURL, prefix + "OS_AUTH_URL"},
// v3 specific
- {&c.UserID, prefix + "OS_USER_ID"},
- {&c.Domain, prefix + "OS_USER_DOMAIN_NAME"},
- {&c.DomainID, prefix + "OS_USER_DOMAIN_ID"},
- {&c.Tenant, prefix + "OS_PROJECT_NAME"},
- {&c.TenantDomain, prefix + "OS_PROJECT_DOMAIN_NAME"},
- {&c.TenantDomainID, prefix + "OS_PROJECT_DOMAIN_ID"},
- {&c.TrustID, prefix + "OS_TRUST_ID"},
+ {&cfg.UserID, prefix + "OS_USER_ID"},
+ {&cfg.Domain, prefix + "OS_USER_DOMAIN_NAME"},
+ {&cfg.DomainID, prefix + "OS_USER_DOMAIN_ID"},
+ {&cfg.Tenant, prefix + "OS_PROJECT_NAME"},
+ {&cfg.TenantDomain, prefix + "OS_PROJECT_DOMAIN_NAME"},
+ {&cfg.TenantDomainID, prefix + "OS_PROJECT_DOMAIN_ID"},
+ {&cfg.TrustID, prefix + "OS_TRUST_ID"},
// v2 specific
- {&c.TenantID, prefix + "OS_TENANT_ID"},
- {&c.Tenant, prefix + "OS_TENANT_NAME"},
+ {&cfg.TenantID, prefix + "OS_TENANT_ID"},
+ {&cfg.Tenant, prefix + "OS_TENANT_NAME"},
// v1 specific
- {&c.AuthURL, prefix + "ST_AUTH"},
- {&c.UserName, prefix + "ST_USER"},
- {&c.APIKey, prefix + "ST_KEY"},
+ {&cfg.AuthURL, prefix + "ST_AUTH"},
+ {&cfg.UserName, prefix + "ST_USER"},
+ {&cfg.APIKey, prefix + "ST_KEY"},
// Application Credential auth
- {&c.ApplicationCredentialID, prefix + "OS_APPLICATION_CREDENTIAL_ID"},
- {&c.ApplicationCredentialName, prefix + "OS_APPLICATION_CREDENTIAL_NAME"},
+ {&cfg.ApplicationCredentialID, prefix + "OS_APPLICATION_CREDENTIAL_ID"},
+ {&cfg.ApplicationCredentialName, prefix + "OS_APPLICATION_CREDENTIAL_NAME"},
// Manual authentication
- {&c.StorageURL, prefix + "OS_STORAGE_URL"},
+ {&cfg.StorageURL, prefix + "OS_STORAGE_URL"},
- {&c.DefaultContainerPolicy, prefix + "SWIFT_DEFAULT_CONTAINER_POLICY"},
+ {&cfg.DefaultContainerPolicy, prefix + "SWIFT_DEFAULT_CONTAINER_POLICY"},
} {
if *val.s == "" {
*val.s = os.Getenv(val.env)
@@ -121,12 +123,11 @@ func ApplyEnvironment(prefix string, cfg interface{}) error {
s *options.SecretString
env string
}{
- {&c.ApplicationCredentialSecret, prefix + "OS_APPLICATION_CREDENTIAL_SECRET"},
- {&c.AuthToken, prefix + "OS_AUTH_TOKEN"},
+ {&cfg.ApplicationCredentialSecret, prefix + "OS_APPLICATION_CREDENTIAL_SECRET"},
+ {&cfg.AuthToken, prefix + "OS_AUTH_TOKEN"},
} {
if val.s.String() == "" {
*val.s = options.NewSecretString(os.Getenv(val.env))
}
}
- return nil
}
diff --git a/internal/backend/swift/config_test.go b/internal/backend/swift/config_test.go
index 35f091a9b..3e094f9ea 100644
--- a/internal/backend/swift/config_test.go
+++ b/internal/backend/swift/config_test.go
@@ -1,29 +1,30 @@
package swift
-import "testing"
+import (
+ "testing"
-var configTests = []struct {
- s string
- cfg Config
-}{
+ "github.com/restic/restic/internal/backend/test"
+)
+
+var configTests = []test.ConfigTestData[Config]{
{
- "swift:cnt1:/",
- Config{
+ S: "swift:cnt1:/",
+ Cfg: Config{
Container: "cnt1",
Prefix: "",
Connections: 5,
},
},
{
- "swift:cnt2:/prefix",
- Config{Container: "cnt2",
+ S: "swift:cnt2:/prefix",
+ Cfg: Config{Container: "cnt2",
Prefix: "prefix",
Connections: 5,
},
},
{
- "swift:cnt3:/prefix/longer",
- Config{Container: "cnt3",
+ S: "swift:cnt3:/prefix/longer",
+ Cfg: Config{Container: "cnt3",
Prefix: "prefix/longer",
Connections: 5,
},
@@ -31,24 +32,7 @@ var configTests = []struct {
}
func TestParseConfig(t *testing.T) {
- for _, test := range configTests {
- t.Run("", func(t *testing.T) {
- v, err := ParseConfig(test.s)
- if err != nil {
- t.Fatalf("parsing %q failed: %v", test.s, err)
- }
-
- cfg, ok := v.(Config)
- if !ok {
- t.Fatalf("wrong type returned, want Config, got %T", cfg)
- }
-
- if cfg != test.cfg {
- t.Fatalf("wrong output for %q, want:\n %#v\ngot:\n %#v",
- test.s, test.cfg, cfg)
- }
- })
- }
+ test.ParseConfigTester(t, ParseConfig, configTests)
}
var configTestsInvalid = []string{
diff --git a/internal/backend/swift/swift.go b/internal/backend/swift/swift.go
index 764c7bb62..1cfc0a65b 100644
--- a/internal/backend/swift/swift.go
+++ b/internal/backend/swift/swift.go
@@ -15,12 +15,11 @@ import (
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/layout"
- "github.com/restic/restic/internal/backend/sema"
+ "github.com/restic/restic/internal/backend/location"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
- "github.com/cenkalti/backoff/v4"
"github.com/ncw/swift/v2"
)
@@ -28,7 +27,6 @@ import (
type beSwift struct {
conn *swift.Connection
connections uint
- sem sema.Semaphore
container string // Container name
prefix string // Prefix of object names in the container
layout.Layout
@@ -37,16 +35,15 @@ type beSwift struct {
// ensure statically that *beSwift implements restic.Backend.
var _ restic.Backend = &beSwift{}
+func NewFactory() location.Factory {
+ return location.NewHTTPBackendFactory("swift", ParseConfig, location.NoPassword, Open, Open)
+}
+
// Open opens the swift backend at a container in region. The container is
// created if it does not exist yet.
func Open(ctx context.Context, cfg Config, rt http.RoundTripper) (restic.Backend, error) {
debug.Log("config %#v", cfg)
- sem, err := sema.New(cfg.Connections)
- if err != nil {
- return nil, err
- }
-
be := &beSwift{
conn: &swift.Connection{
UserName: cfg.UserName,
@@ -72,7 +69,6 @@ func Open(ctx context.Context, cfg Config, rt http.RoundTripper) (restic.Backend
Transport: rt,
},
connections: cfg.Connections,
- sem: sem,
container: cfg.Container,
prefix: cfg.Prefix,
Layout: &layout.DefaultLayout{
@@ -143,18 +139,6 @@ func (be *beSwift) Load(ctx context.Context, h restic.Handle, length int, offset
}
func (be *beSwift) openReader(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
- debug.Log("Load %v, length %v, offset %v", h, length, offset)
- if err := h.Valid(); err != nil {
- return nil, backoff.Permanent(err)
- }
-
- if offset < 0 {
- return nil, errors.New("offset is negative")
- }
-
- if length < 0 {
- return nil, errors.Errorf("invalid length %d", length)
- }
objName := be.Filename(h)
@@ -167,59 +151,34 @@ func (be *beSwift) openReader(ctx context.Context, h restic.Handle, length int,
headers["Range"] = fmt.Sprintf("bytes=%d-%d", offset, offset+int64(length)-1)
}
- if _, ok := headers["Range"]; ok {
- debug.Log("Load(%v) send range %v", h, headers["Range"])
- }
-
- be.sem.GetToken()
obj, _, err := be.conn.ObjectOpen(ctx, be.container, objName, false, headers)
if err != nil {
- debug.Log(" err %v", err)
- be.sem.ReleaseToken()
return nil, errors.Wrap(err, "conn.ObjectOpen")
}
- return be.sem.ReleaseTokenOnClose(obj, nil), nil
+ return obj, nil
}
// Save stores data in the backend at the handle.
func (be *beSwift) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
- if err := h.Valid(); err != nil {
- return backoff.Permanent(err)
- }
-
objName := be.Filename(h)
-
- debug.Log("Save %v at %v", h, objName)
-
- be.sem.GetToken()
- defer be.sem.ReleaseToken()
-
encoding := "binary/octet-stream"
- debug.Log("PutObject(%v, %v, %v)", be.container, objName, encoding)
hdr := swift.Headers{"Content-Length": strconv.FormatInt(rd.Length(), 10)}
_, err := be.conn.ObjectPut(ctx,
be.container, objName, rd, true, hex.EncodeToString(rd.Hash()),
encoding, hdr)
// swift does not return the upload length
- debug.Log("%v, err %#v", objName, err)
return errors.Wrap(err, "client.PutObject")
}
// Stat returns information about a blob.
func (be *beSwift) Stat(ctx context.Context, h restic.Handle) (bi restic.FileInfo, err error) {
- debug.Log("%v", h)
-
objName := be.Filename(h)
- be.sem.GetToken()
- defer be.sem.ReleaseToken()
-
obj, _, err := be.conn.Object(ctx, be.container, objName)
if err != nil {
- debug.Log("Object() err %v", err)
return restic.FileInfo{}, errors.Wrap(err, "conn.Object")
}
@@ -230,27 +189,19 @@ func (be *beSwift) Stat(ctx context.Context, h restic.Handle) (bi restic.FileInf
func (be *beSwift) Remove(ctx context.Context, h restic.Handle) error {
objName := be.Filename(h)
- be.sem.GetToken()
- defer be.sem.ReleaseToken()
-
err := be.conn.ObjectDelete(ctx, be.container, objName)
- debug.Log("Remove(%v) -> err %v", h, err)
return errors.Wrap(err, "conn.ObjectDelete")
}
// List runs fn for each file in the backend which has the type t. When an
// error occurs (or fn returns an error), List stops and returns it.
func (be *beSwift) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error {
- debug.Log("listing %v", t)
-
prefix, _ := be.Basedir(t)
prefix += "/"
err := be.conn.ObjectsWalk(ctx, be.container, &swift.ObjectsOpts{Prefix: prefix},
func(ctx context.Context, opts *swift.ObjectsOpts) (interface{}, error) {
- be.sem.GetToken()
newObjects, err := be.conn.Objects(ctx, be.container, opts)
- be.sem.ReleaseToken()
if err != nil {
return nil, errors.Wrap(err, "conn.ObjectNames")
@@ -285,13 +236,6 @@ func (be *beSwift) List(ctx context.Context, t restic.FileType, fn func(restic.F
return ctx.Err()
}
-// Remove keys for a specified backend type.
-func (be *beSwift) removeKeys(ctx context.Context, t restic.FileType) error {
- return be.List(ctx, t, func(fi restic.FileInfo) error {
- return be.Remove(ctx, restic.Handle{Type: t, Name: fi.Name})
- })
-}
-
// IsNotExist returns true if the error is caused by a not existing file.
func (be *beSwift) IsNotExist(err error) bool {
var e *swift.Error
@@ -301,26 +245,7 @@ func (be *beSwift) IsNotExist(err error) bool {
// Delete removes all restic objects in the container.
// It will not remove the container itself.
func (be *beSwift) Delete(ctx context.Context) error {
- alltypes := []restic.FileType{
- restic.PackFile,
- restic.KeyFile,
- restic.LockFile,
- restic.SnapshotFile,
- restic.IndexFile}
-
- for _, t := range alltypes {
- err := be.removeKeys(ctx, t)
- if err != nil {
- return nil
- }
- }
-
- err := be.Remove(ctx, restic.Handle{Type: restic.ConfigFile})
- if err != nil && !be.IsNotExist(err) {
- return err
- }
-
- return nil
+ return backend.DefaultDelete(ctx, be)
}
// Close does nothing
diff --git a/internal/backend/swift/swift_test.go b/internal/backend/swift/swift_test.go
index 0912e4f7e..98ee5b1c1 100644
--- a/internal/backend/swift/swift_test.go
+++ b/internal/backend/swift/swift_test.go
@@ -1,27 +1,19 @@
package swift_test
import (
- "context"
"fmt"
"os"
"testing"
"time"
- "github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/swift"
"github.com/restic/restic/internal/backend/test"
- "github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
-func newSwiftTestSuite(t testing.TB) *test.Suite {
- tr, err := backend.Transport(backend.TransportOptions{})
- if err != nil {
- t.Fatalf("cannot create transport for tests: %v", err)
- }
-
- return &test.Suite{
+func newSwiftTestSuite(t testing.TB) *test.Suite[swift.Config] {
+ return &test.Suite[swift.Config]{
// do not use excessive data
MinimalData: true,
@@ -42,59 +34,19 @@ func newSwiftTestSuite(t testing.TB) *test.Suite {
},
// NewConfig returns a config for a new temporary backend that will be used in tests.
- NewConfig: func() (interface{}, error) {
- swiftcfg, err := swift.ParseConfig(os.Getenv("RESTIC_TEST_SWIFT"))
+ NewConfig: func() (*swift.Config, error) {
+ cfg, err := swift.ParseConfig(os.Getenv("RESTIC_TEST_SWIFT"))
if err != nil {
return nil, err
}
- cfg := swiftcfg.(swift.Config)
- if err = swift.ApplyEnvironment("RESTIC_TEST_", &cfg); err != nil {
- return nil, err
- }
+ cfg.ApplyEnvironment("RESTIC_TEST_")
cfg.Prefix += fmt.Sprintf("/test-%d", time.Now().UnixNano())
t.Logf("using prefix %v", cfg.Prefix)
return cfg, nil
},
- // CreateFn is a function that creates a temporary repository for the tests.
- Create: func(config interface{}) (restic.Backend, error) {
- cfg := config.(swift.Config)
-
- be, err := swift.Open(context.TODO(), cfg, tr)
- if err != nil {
- return nil, err
- }
-
- _, err = be.Stat(context.TODO(), restic.Handle{Type: restic.ConfigFile})
- if err != nil && !be.IsNotExist(err) {
- return nil, err
- }
-
- if err == nil {
- return nil, errors.New("config already exists")
- }
-
- return be, nil
- },
-
- // OpenFn is a function that opens a previously created temporary repository.
- Open: func(config interface{}) (restic.Backend, error) {
- cfg := config.(swift.Config)
- return swift.Open(context.TODO(), cfg, tr)
- },
-
- // CleanupFn removes data created during the tests.
- Cleanup: func(config interface{}) error {
- cfg := config.(swift.Config)
-
- be, err := swift.Open(context.TODO(), cfg, tr)
- if err != nil {
- return err
- }
-
- return be.Delete(context.TODO())
- },
+ Factory: swift.NewFactory(),
}
}
diff --git a/internal/backend/test/benchmarks.go b/internal/backend/test/benchmarks.go
index b977eb682..150ef3987 100644
--- a/internal/backend/test/benchmarks.go
+++ b/internal/backend/test/benchmarks.go
@@ -29,7 +29,7 @@ func remove(t testing.TB, be restic.Backend, h restic.Handle) {
// BenchmarkLoadFile benchmarks the Load() method of a backend by
// loading a complete file.
-func (s *Suite) BenchmarkLoadFile(t *testing.B) {
+func (s *Suite[C]) BenchmarkLoadFile(t *testing.B) {
be := s.open(t)
defer s.close(t, be)
@@ -64,7 +64,7 @@ func (s *Suite) BenchmarkLoadFile(t *testing.B) {
// BenchmarkLoadPartialFile benchmarks the Load() method of a backend by
// loading the remainder of a file starting at a given offset.
-func (s *Suite) BenchmarkLoadPartialFile(t *testing.B) {
+func (s *Suite[C]) BenchmarkLoadPartialFile(t *testing.B) {
be := s.open(t)
defer s.close(t, be)
@@ -101,7 +101,7 @@ func (s *Suite) BenchmarkLoadPartialFile(t *testing.B) {
// BenchmarkLoadPartialFileOffset benchmarks the Load() method of a
// backend by loading a number of bytes of a file starting at a given offset.
-func (s *Suite) BenchmarkLoadPartialFileOffset(t *testing.B) {
+func (s *Suite[C]) BenchmarkLoadPartialFileOffset(t *testing.B) {
be := s.open(t)
defer s.close(t, be)
@@ -139,7 +139,7 @@ func (s *Suite) BenchmarkLoadPartialFileOffset(t *testing.B) {
}
// BenchmarkSave benchmarks the Save() method of a backend.
-func (s *Suite) BenchmarkSave(t *testing.B) {
+func (s *Suite[C]) BenchmarkSave(t *testing.B) {
be := s.open(t)
defer s.close(t, be)
diff --git a/internal/backend/test/config.go b/internal/backend/test/config.go
new file mode 100644
index 000000000..496ba2761
--- /dev/null
+++ b/internal/backend/test/config.go
@@ -0,0 +1,28 @@
+package test
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+)
+
+type ConfigTestData[C comparable] struct {
+ S string
+ Cfg C
+}
+
+func ParseConfigTester[C comparable](t *testing.T, parser func(s string) (*C, error), tests []ConfigTestData[C]) {
+ for i, test := range tests {
+ t.Run(fmt.Sprint(i), func(t *testing.T) {
+ cfg, err := parser(test.S)
+ if err != nil {
+ t.Fatalf("%s failed: %v", test.S, err)
+ }
+
+ if !reflect.DeepEqual(*cfg, test.Cfg) {
+ t.Fatalf("input: %s\n wrong config, want:\n %#v\ngot:\n %#v",
+ test.S, test.Cfg, *cfg)
+ }
+ })
+ }
+}
diff --git a/internal/backend/test/suite.go b/internal/backend/test/suite.go
index 45c6d96bd..bb77124d7 100644
--- a/internal/backend/test/suite.go
+++ b/internal/backend/test/suite.go
@@ -1,31 +1,30 @@
package test
import (
+ "context"
+ "fmt"
"reflect"
"strings"
"testing"
"time"
+ "github.com/restic/restic/internal/backend"
+ "github.com/restic/restic/internal/backend/location"
+ "github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/test"
)
// Suite implements a test suite for restic backends.
-type Suite struct {
+type Suite[C any] struct {
// Config should be used to configure the backend.
- Config interface{}
+ Config *C
// NewConfig returns a config for a new temporary backend that will be used in tests.
- NewConfig func() (interface{}, error)
+ NewConfig func() (*C, error)
- // CreateFn is a function that creates a temporary repository for the tests.
- Create func(cfg interface{}) (restic.Backend, error)
-
- // OpenFn is a function that opens a previously created temporary repository.
- Open func(cfg interface{}) (restic.Backend, error)
-
- // CleanupFn removes data created during the tests.
- Cleanup func(cfg interface{}) error
+ // Factory contains a factory that can be used to create or open a repository for the tests.
+ Factory location.Factory
// MinimalData instructs the tests to not use excessive data.
MinimalData bool
@@ -40,7 +39,7 @@ type Suite struct {
}
// RunTests executes all defined tests as subtests of t.
-func (s *Suite) RunTests(t *testing.T) {
+func (s *Suite[C]) RunTests(t *testing.T) {
var err error
s.Config, err = s.NewConfig()
if err != nil {
@@ -60,11 +59,7 @@ func (s *Suite) RunTests(t *testing.T) {
return
}
- if s.Cleanup != nil {
- if err = s.Cleanup(s.Config); err != nil {
- t.Fatal(err)
- }
- }
+ s.cleanup(t)
}
type testFunction struct {
@@ -72,7 +67,7 @@ type testFunction struct {
Fn func(*testing.T)
}
-func (s *Suite) testFuncs(t testing.TB) (funcs []testFunction) {
+func (s *Suite[C]) testFuncs(t testing.TB) (funcs []testFunction) {
tpe := reflect.TypeOf(s)
v := reflect.ValueOf(s)
@@ -107,7 +102,7 @@ type benchmarkFunction struct {
Fn func(*testing.B)
}
-func (s *Suite) benchmarkFuncs(t testing.TB) (funcs []benchmarkFunction) {
+func (s *Suite[C]) benchmarkFuncs(t testing.TB) (funcs []benchmarkFunction) {
tpe := reflect.TypeOf(s)
v := reflect.ValueOf(s)
@@ -138,7 +133,7 @@ func (s *Suite) benchmarkFuncs(t testing.TB) (funcs []benchmarkFunction) {
}
// RunBenchmarks executes all defined benchmarks as subtests of b.
-func (s *Suite) RunBenchmarks(b *testing.B) {
+func (s *Suite[C]) RunBenchmarks(b *testing.B) {
var err error
s.Config, err = s.NewConfig()
if err != nil {
@@ -158,28 +153,62 @@ func (s *Suite) RunBenchmarks(b *testing.B) {
return
}
- if err = s.Cleanup(s.Config); err != nil {
- b.Fatal(err)
+ s.cleanup(b)
+}
+
+func (s *Suite[C]) createOrError() (restic.Backend, error) {
+ tr, err := backend.Transport(backend.TransportOptions{})
+ if err != nil {
+ return nil, fmt.Errorf("cannot create transport for tests: %v", err)
+ }
+
+ be, err := s.Factory.Create(context.TODO(), s.Config, tr, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = be.Stat(context.TODO(), restic.Handle{Type: restic.ConfigFile})
+ if err != nil && !be.IsNotExist(err) {
+ return nil, err
+ }
+
+ if err == nil {
+ return nil, errors.New("config already exists")
}
+
+ return be, nil
}
-func (s *Suite) create(t testing.TB) restic.Backend {
- be, err := s.Create(s.Config)
+func (s *Suite[C]) create(t testing.TB) restic.Backend {
+ be, err := s.createOrError()
if err != nil {
t.Fatal(err)
}
return be
}
-func (s *Suite) open(t testing.TB) restic.Backend {
- be, err := s.Open(s.Config)
+func (s *Suite[C]) open(t testing.TB) restic.Backend {
+ tr, err := backend.Transport(backend.TransportOptions{})
+ if err != nil {
+ t.Fatalf("cannot create transport for tests: %v", err)
+ }
+
+ be, err := s.Factory.Open(context.TODO(), s.Config, tr, nil)
if err != nil {
t.Fatal(err)
}
return be
}
-func (s *Suite) close(t testing.TB, be restic.Backend) {
+func (s *Suite[C]) cleanup(t testing.TB) {
+ be := s.open(t)
+ if err := be.Delete(context.TODO()); err != nil {
+ t.Fatal(err)
+ }
+ s.close(t, be)
+}
+
+func (s *Suite[C]) close(t testing.TB, be restic.Backend) {
err := be.Close()
if err != nil {
t.Fatal(err)
diff --git a/internal/backend/test/tests.go b/internal/backend/test/tests.go
index b98af59c3..c03db79e3 100644
--- a/internal/backend/test/tests.go
+++ b/internal/backend/test/tests.go
@@ -36,9 +36,15 @@ func beTest(ctx context.Context, be restic.Backend, h restic.Handle) (bool, erro
return err == nil, err
}
+// TestStripPasswordCall tests that the StripPassword method of a factory can be called without crashing.
+// It does not verify whether passwords are removed correctly
+func (s *Suite[C]) TestStripPasswordCall(_ *testing.T) {
+ s.Factory.StripPassword("some random string")
+}
+
// TestCreateWithConfig tests that creating a backend in a location which already
// has a config file fails.
-func (s *Suite) TestCreateWithConfig(t *testing.T) {
+func (s *Suite[C]) TestCreateWithConfig(t *testing.T) {
b := s.open(t)
defer s.close(t, b)
@@ -57,7 +63,7 @@ func (s *Suite) TestCreateWithConfig(t *testing.T) {
store(t, b, restic.ConfigFile, []byte("test config"))
// now create the backend again, this must fail
- _, err = s.Create(s.Config)
+ _, err = s.createOrError()
if err == nil {
t.Fatalf("expected error not found for creating a backend with an existing config file")
}
@@ -70,7 +76,7 @@ func (s *Suite) TestCreateWithConfig(t *testing.T) {
}
// TestLocation tests that a location string is returned.
-func (s *Suite) TestLocation(t *testing.T) {
+func (s *Suite[C]) TestLocation(t *testing.T) {
b := s.open(t)
defer s.close(t, b)
@@ -81,7 +87,7 @@ func (s *Suite) TestLocation(t *testing.T) {
}
// TestConfig saves and loads a config from the backend.
-func (s *Suite) TestConfig(t *testing.T) {
+func (s *Suite[C]) TestConfig(t *testing.T) {
b := s.open(t)
defer s.close(t, b)
@@ -118,23 +124,13 @@ func (s *Suite) TestConfig(t *testing.T) {
}
// TestLoad tests the backend's Load function.
-func (s *Suite) TestLoad(t *testing.T) {
+func (s *Suite[C]) TestLoad(t *testing.T) {
seedRand(t)
b := s.open(t)
defer s.close(t, b)
- noop := func(rd io.Reader) error {
- return nil
- }
-
- err := b.Load(context.TODO(), restic.Handle{}, 0, 0, noop)
- if err == nil {
- t.Fatalf("Load() did not return an error for invalid handle")
- }
- test.Assert(t, !b.IsNotExist(err), "IsNotExist() should not accept an invalid handle error: %v", err)
-
- err = testLoad(b, restic.Handle{Type: restic.PackFile, Name: "foobar"}, 0, 0)
+ err := testLoad(b, restic.Handle{Type: restic.PackFile, Name: "foobar"})
if err == nil {
t.Fatalf("Load() did not return an error for non-existing blob")
}
@@ -153,11 +149,6 @@ func (s *Suite) TestLoad(t *testing.T) {
t.Logf("saved %d bytes as %v", length, handle)
- err = b.Load(context.TODO(), handle, 100, -1, noop)
- if err == nil {
- t.Fatalf("Load() returned no error for negative offset!")
- }
-
err = b.Load(context.TODO(), handle, 0, 0, func(rd io.Reader) error {
_, err := io.Copy(io.Discard, rd)
if err != nil {
@@ -237,8 +228,12 @@ func (s *Suite) TestLoad(t *testing.T) {
test.OK(t, b.Remove(context.TODO(), handle))
}
+type setter interface {
+ SetListMaxItems(int)
+}
+
// TestList makes sure that the backend implements List() pagination correctly.
-func (s *Suite) TestList(t *testing.T) {
+func (s *Suite[C]) TestList(t *testing.T) {
seedRand(t)
numTestFiles := rand.Intn(20) + 20
@@ -284,10 +279,6 @@ func (s *Suite) TestList(t *testing.T) {
t.Run(fmt.Sprintf("max-%v", test.maxItems), func(t *testing.T) {
list2 := make(map[restic.ID]int64)
- type setter interface {
- SetListMaxItems(int)
- }
-
if s, ok := b.(setter); ok {
t.Logf("setting max list items to %d", test.maxItems)
s.SetListMaxItems(test.maxItems)
@@ -341,7 +332,7 @@ func (s *Suite) TestList(t *testing.T) {
}
// TestListCancel tests that the context is respected and the error is returned by List.
-func (s *Suite) TestListCancel(t *testing.T) {
+func (s *Suite[C]) TestListCancel(t *testing.T) {
seedRand(t)
numTestFiles := 5
@@ -437,6 +428,11 @@ func (s *Suite) TestListCancel(t *testing.T) {
// wait until the context is cancelled
<-ctxTimeout.Done()
+ // The cancellation of a context first closes the done channel of the context and
+ // _afterwards_ propagates the cancellation to child contexts. If the List
+ // implementation uses a child context, then it may take a moment until that context
+ // is also cancelled. Thus give the context cancellation a moment to propagate.
+ time.Sleep(time.Millisecond)
return nil
})
@@ -481,7 +477,7 @@ func (ec errorCloser) Rewind() error {
}
// TestSave tests saving data in the backend.
-func (s *Suite) TestSave(t *testing.T) {
+func (s *Suite[C]) TestSave(t *testing.T) {
seedRand(t)
b := s.open(t)
@@ -597,7 +593,7 @@ func (r *incompleteByteReader) Length() int64 {
}
// TestSaveError tests saving data in the backend.
-func (s *Suite) TestSaveError(t *testing.T) {
+func (s *Suite[C]) TestSaveError(t *testing.T) {
seedRand(t)
b := s.open(t)
@@ -636,7 +632,7 @@ func (b *wrongByteReader) Hash() []byte {
}
// TestSaveWrongHash tests that uploads with a wrong hash fail
-func (s *Suite) TestSaveWrongHash(t *testing.T) {
+func (s *Suite[C]) TestSaveWrongHash(t *testing.T) {
seedRand(t)
b := s.open(t)
@@ -687,14 +683,14 @@ func store(t testing.TB, b restic.Backend, tpe restic.FileType, data []byte) res
}
// testLoad loads a blob (but discards its contents).
-func testLoad(b restic.Backend, h restic.Handle, length int, offset int64) error {
+func testLoad(b restic.Backend, h restic.Handle) error {
return b.Load(context.TODO(), h, 0, 0, func(rd io.Reader) (ierr error) {
_, ierr = io.Copy(io.Discard, rd)
return ierr
})
}
-func (s *Suite) delayedRemove(t testing.TB, be restic.Backend, handles ...restic.Handle) error {
+func (s *Suite[C]) delayedRemove(t testing.TB, be restic.Backend, handles ...restic.Handle) error {
// Some backend (swift, I'm looking at you) may implement delayed
// removal of data. Let's wait a bit if this happens.
@@ -761,7 +757,7 @@ func delayedList(t testing.TB, b restic.Backend, tpe restic.FileType, max int, m
}
// TestBackend tests all functions of the backend.
-func (s *Suite) TestBackend(t *testing.T) {
+func (s *Suite[C]) TestBackend(t *testing.T) {
b := s.open(t)
defer s.close(t, b)
@@ -788,7 +784,7 @@ func (s *Suite) TestBackend(t *testing.T) {
test.Assert(t, b.IsNotExist(err), "IsNotExist() did not recognize Stat() error: %v", err)
// try to read not existing blob
- err = testLoad(b, h, 0, 0)
+ err = testLoad(b, h)
test.Assert(t, err != nil, "blob could be read before creation")
test.Assert(t, b.IsNotExist(err), "IsNotExist() did not recognize Load() error: %v", err)
@@ -882,7 +878,7 @@ func (s *Suite) TestBackend(t *testing.T) {
}
// TestZZZDelete tests the Delete function. The name ensures that this test is executed last.
-func (s *Suite) TestZZZDelete(t *testing.T) {
+func (s *Suite[C]) TestZZZDelete(t *testing.T) {
if !test.TestCleanupTempDirs {
t.Skipf("not removing backend, TestCleanupTempDirs is false")
}
diff --git a/internal/backend/utils.go b/internal/backend/utils.go
index d2ac44670..cd6614f34 100644
--- a/internal/backend/utils.go
+++ b/internal/backend/utils.go
@@ -62,6 +62,7 @@ func LimitReadCloser(r io.ReadCloser, n int64) *LimitedReadCloser {
func DefaultLoad(ctx context.Context, h restic.Handle, length int, offset int64,
openReader func(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error),
fn func(rd io.Reader) error) error {
+
rd, err := openReader(ctx, h, length, offset)
if err != nil {
return err
@@ -74,6 +75,31 @@ func DefaultLoad(ctx context.Context, h restic.Handle, length int, offset int64,
return rd.Close()
}
+// DefaultDelete removes all restic keys in the bucket. It will not remove the bucket itself.
+func DefaultDelete(ctx context.Context, be restic.Backend) error {
+ alltypes := []restic.FileType{
+ restic.PackFile,
+ restic.KeyFile,
+ restic.LockFile,
+ restic.SnapshotFile,
+ restic.IndexFile}
+
+ for _, t := range alltypes {
+ err := be.List(ctx, t, func(fi restic.FileInfo) error {
+ return be.Remove(ctx, restic.Handle{Type: t, Name: fi.Name})
+ })
+ if err != nil {
+ return nil
+ }
+ }
+ err := be.Remove(ctx, restic.Handle{Type: restic.ConfigFile})
+ if err != nil && be.IsNotExist(err) {
+ err = nil
+ }
+
+ return err
+}
+
type memorizedLister struct {
fileInfos []restic.FileInfo
tpe restic.FileType
diff --git a/internal/backend/utils_test.go b/internal/backend/utils_test.go
index 16995afd3..8392bfa8f 100644
--- a/internal/backend/utils_test.go
+++ b/internal/backend/utils_test.go
@@ -154,7 +154,7 @@ type mockReader struct {
closed bool
}
-func (rd *mockReader) Read(p []byte) (n int, err error) {
+func (rd *mockReader) Read(_ []byte) (n int, err error) {
return 0, nil
}
func (rd *mockReader) Close() error {
diff --git a/internal/cache/backend.go b/internal/cache/backend.go
index a707f8243..311b099ee 100644
--- a/internal/cache/backend.go
+++ b/internal/cache/backend.go
@@ -83,7 +83,7 @@ func (b *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindRea
if err != nil {
debug.Log("unable to save %v to cache: %v", h, err)
_ = b.Cache.remove(h)
- return nil
+ return err
}
return nil
@@ -106,11 +106,19 @@ func (b *Backend) cacheFile(ctx context.Context, h restic.Handle) error {
return nil
}
+ defer func() {
+ // signal other waiting goroutines that the file may now be cached
+ close(finish)
+
+ // remove the finish channel from the map
+ b.inProgressMutex.Lock()
+ delete(b.inProgress, h)
+ b.inProgressMutex.Unlock()
+ }()
+
// test again, maybe the file was cached in the meantime
if !b.Cache.Has(h) {
-
// nope, it's still not in the cache, pull it from the repo and save it
-
err := b.Backend.Load(ctx, h, 0, 0, func(rd io.Reader) error {
return b.Cache.Save(h, rd)
})
@@ -118,21 +126,14 @@ func (b *Backend) cacheFile(ctx context.Context, h restic.Handle) error {
// try to remove from the cache, ignore errors
_ = b.Cache.remove(h)
}
+ return err
}
- // signal other waiting goroutines that the file may now be cached
- close(finish)
-
- // remove the finish channel from the map
- b.inProgressMutex.Lock()
- delete(b.inProgress, h)
- b.inProgressMutex.Unlock()
-
return nil
}
// loadFromCache will try to load the file from the cache.
-func (b *Backend) loadFromCache(ctx context.Context, h restic.Handle, length int, offset int64, consumer func(rd io.Reader) error) (bool, error) {
+func (b *Backend) loadFromCache(h restic.Handle, length int, offset int64, consumer func(rd io.Reader) error) (bool, error) {
rd, err := b.Cache.load(h, length, offset)
if err != nil {
return false, err
@@ -159,7 +160,7 @@ func (b *Backend) Load(ctx context.Context, h restic.Handle, length int, offset
}
// try loading from cache without checking that the handle is actually cached
- inCache, err := b.loadFromCache(ctx, h, length, offset, consumer)
+ inCache, err := b.loadFromCache(h, length, offset, consumer)
if inCache {
if err == nil {
return nil
@@ -178,11 +179,13 @@ func (b *Backend) Load(ctx context.Context, h restic.Handle, length int, offset
debug.Log("auto-store %v in the cache", h)
err = b.cacheFile(ctx, h)
- if err == nil {
- inCache, err = b.loadFromCache(ctx, h, length, offset, consumer)
- if inCache {
- return err
- }
+ if err != nil {
+ return err
+ }
+
+ inCache, err = b.loadFromCache(h, length, offset, consumer)
+ if inCache {
+ return err
}
debug.Log("error caching %v: %v, falling back to backend", h, err)
@@ -211,3 +214,7 @@ func (b *Backend) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, e
func (b *Backend) IsNotExist(err error) bool {
return b.Backend.IsNotExist(err)
}
+
+func (b *Backend) Unwrap() restic.Backend {
+ return b.Backend
+}
diff --git a/internal/cache/backend_test.go b/internal/cache/backend_test.go
index 3ddab5952..930d853b2 100644
--- a/internal/cache/backend_test.go
+++ b/internal/cache/backend_test.go
@@ -118,7 +118,7 @@ type loadErrorBackend struct {
loadError error
}
-func (be loadErrorBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
+func (be loadErrorBackend) Load(_ context.Context, _ restic.Handle, _ int, _ int64, _ func(rd io.Reader) error) error {
time.Sleep(10 * time.Millisecond)
return be.loadError
}
diff --git a/internal/cache/cache.go b/internal/cache/cache.go
index 075f7f6a1..5b3601741 100644
--- a/internal/cache/cache.go
+++ b/internal/cache/cache.go
@@ -26,10 +26,6 @@ const fileMode = 0644
func readVersion(dir string) (v uint, err error) {
buf, err := os.ReadFile(filepath.Join(dir, "version"))
- if errors.Is(err, os.ErrNotExist) {
- return 0, nil
- }
-
if err != nil {
return 0, errors.Wrap(err, "readVersion")
}
@@ -53,10 +49,6 @@ var cacheLayoutPaths = map[restic.FileType]string{
const cachedirTagSignature = "Signature: 8a477f597d28d172789f06886806bc55\n"
func writeCachedirTag(dir string) error {
- if err := fs.MkdirAll(dir, dirMode); err != nil {
- return errors.WithStack(err)
- }
-
tagfile := filepath.Join(dir, "CACHEDIR.TAG")
f, err := fs.OpenFile(tagfile, os.O_CREATE|os.O_EXCL|os.O_WRONLY, fileMode)
if err != nil {
@@ -89,7 +81,7 @@ func New(id string, basedir string) (c *Cache, err error) {
}
}
- err = fs.MkdirAll(basedir, 0700)
+ err = fs.MkdirAll(basedir, dirMode)
if err != nil {
return nil, errors.WithStack(err)
}
@@ -102,30 +94,32 @@ func New(id string, basedir string) (c *Cache, err error) {
cachedir := filepath.Join(basedir, id)
debug.Log("using cache dir %v", cachedir)
+ created := false
v, err := readVersion(cachedir)
- if err != nil {
- return nil, err
- }
-
- if v > cacheVersion {
- return nil, errors.New("cache version is newer")
- }
-
- // create the repo cache dir if it does not exist yet
- var created bool
- _, err = fs.Lstat(cachedir)
- if errors.Is(err, os.ErrNotExist) {
- err = fs.MkdirAll(cachedir, dirMode)
+ switch {
+ case err == nil:
+ if v > cacheVersion {
+ return nil, errors.New("cache version is newer")
+ }
+ // Update the timestamp so that we can detect old cache dirs.
+ err = updateTimestamp(cachedir)
if err != nil {
+ return nil, err
+ }
+
+ case errors.Is(err, os.ErrNotExist):
+ // Create the repo cache dir. The parent exists, so Mkdir suffices.
+ err := fs.Mkdir(cachedir, dirMode)
+ switch {
+ case err == nil:
+ created = true
+ case errors.Is(err, os.ErrExist):
+ default:
return nil, errors.WithStack(err)
}
- created = true
- }
- // update the timestamp so that we can detect old cache dirs
- err = updateTimestamp(cachedir)
- if err != nil {
- return nil, err
+ default:
+ return nil, errors.Wrap(err, "readVersion")
}
if v < cacheVersion {
diff --git a/internal/cache/cache_test.go b/internal/cache/cache_test.go
new file mode 100644
index 000000000..b9d0b905d
--- /dev/null
+++ b/internal/cache/cache_test.go
@@ -0,0 +1,46 @@
+package cache
+
+import (
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/restic/restic/internal/restic"
+ rtest "github.com/restic/restic/internal/test"
+)
+
+func TestNew(t *testing.T) {
+ parent := rtest.TempDir(t)
+ basedir := filepath.Join(parent, "cache")
+ id := restic.NewRandomID().String()
+ tagFile := filepath.Join(basedir, "CACHEDIR.TAG")
+ versionFile := filepath.Join(basedir, id, "version")
+
+ const (
+ stepCreate = iota
+ stepComplete
+ stepRmTag
+ stepRmVersion
+ stepEnd
+ )
+
+ for step := stepCreate; step < stepEnd; step++ {
+ switch step {
+ case stepRmTag:
+ rtest.OK(t, os.Remove(tagFile))
+ case stepRmVersion:
+ rtest.OK(t, os.Remove(versionFile))
+ }
+
+ c, err := New(id, basedir)
+ rtest.OK(t, err)
+ rtest.Equals(t, basedir, c.Base)
+ rtest.Equals(t, step == stepCreate, c.Created)
+
+ for _, name := range []string{tagFile, versionFile} {
+ info, err := os.Lstat(name)
+ rtest.OK(t, err)
+ rtest.Assert(t, info.Mode().IsRegular(), "")
+ }
+ }
+}
diff --git a/internal/cache/file_test.go b/internal/cache/file_test.go
index 111a2430f..e72133cd7 100644
--- a/internal/cache/file_test.go
+++ b/internal/cache/file_test.go
@@ -11,6 +11,7 @@ import (
"time"
"github.com/restic/restic/internal/errors"
+ "github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/test"
@@ -266,3 +267,19 @@ func TestFileSaveConcurrent(t *testing.T) {
saved := load(t, c, h)
test.Equals(t, data, saved)
}
+
+func TestFileSaveAfterDamage(t *testing.T) {
+ c := TestNewCache(t)
+ test.OK(t, fs.RemoveAll(c.path))
+
+ // save a few bytes of data in the cache
+ data := test.Random(123456789, 42)
+ id := restic.Hash(data)
+ h := restic.Handle{
+ Type: restic.PackFile,
+ Name: id.String(),
+ }
+ if err := c.Save(h, bytes.NewReader(data)); err == nil {
+ t.Fatal("Missing error when saving to deleted cache directory")
+ }
+}
diff --git a/internal/checker/checker_test.go b/internal/checker/checker_test.go
index 775484652..6405ecfbd 100644
--- a/internal/checker/checker_test.go
+++ b/internal/checker/checker_test.go
@@ -331,10 +331,6 @@ func (erd errorReadCloser) Read(p []byte) (int, error) {
// induceError flips a bit in the slice.
func induceError(data []byte) {
- if rand.Float32() < 0.2 {
- return
- }
-
pos := rand.Intn(len(data))
data[pos] ^= 1
}
diff --git a/internal/crypto/crypto.go b/internal/crypto/crypto.go
index 56ee61dba..752d886e3 100644
--- a/internal/crypto/crypto.go
+++ b/internal/crypto/crypto.go
@@ -322,7 +322,7 @@ func (k *Key) Seal(dst, nonce, plaintext, additionalData []byte) []byte {
//
// Even if the function fails, the contents of dst, up to its capacity,
// may be overwritten.
-func (k *Key) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) {
+func (k *Key) Open(dst, nonce, ciphertext, _ []byte) ([]byte, error) {
if !k.Valid() {
return nil, errors.New("invalid key")
}
diff --git a/internal/debug/stacktrace.go b/internal/debug/stacktrace.go
new file mode 100644
index 000000000..a8db83160
--- /dev/null
+++ b/internal/debug/stacktrace.go
@@ -0,0 +1,15 @@
+package debug
+
+import "runtime"
+
+func DumpStacktrace() string {
+ buf := make([]byte, 128*1024)
+
+ for {
+ l := runtime.Stack(buf, true)
+ if l < len(buf) {
+ return string(buf[:l])
+ }
+ buf = make([]byte, len(buf)*2)
+ }
+}
diff --git a/internal/debug/testing.go b/internal/debug/testing.go
new file mode 100644
index 000000000..31cbd7931
--- /dev/null
+++ b/internal/debug/testing.go
@@ -0,0 +1,23 @@
+package debug
+
+import (
+ "log"
+ "os"
+ "testing"
+)
+
+// TestLogToStderr configures debug to log to stderr if not the debug log is
+// not already configured and returns whether logging was enabled.
+func TestLogToStderr(_ testing.TB) bool {
+ if opts.isEnabled {
+ return false
+ }
+ opts.logger = log.New(os.Stderr, "", log.LstdFlags)
+ opts.isEnabled = true
+ return true
+}
+
+func TestDisableLog(_ testing.TB) {
+ opts.logger = nil
+ opts.isEnabled = false
+}
diff --git a/internal/dump/tar.go b/internal/dump/tar.go
index 6e87aabe5..df9ea429d 100644
--- a/internal/dump/tar.go
+++ b/internal/dump/tar.go
@@ -3,6 +3,7 @@ package dump
import (
"archive/tar"
"context"
+ "fmt"
"os"
"path/filepath"
"strings"
@@ -94,9 +95,8 @@ func (d *Dumper) dumpNodeTar(ctx context.Context, node *restic.Node, w *tar.Writ
err = w.WriteHeader(header)
if err != nil {
- return errors.Wrap(err, "TarHeader")
+ return fmt.Errorf("writing header for %q: %w", node.Path, err)
}
-
return d.writeNode(ctx, w, node)
}
diff --git a/internal/dump/tar_test.go b/internal/dump/tar_test.go
index 0f2cb27a8..3556e6aeb 100644
--- a/internal/dump/tar_test.go
+++ b/internal/dump/tar_test.go
@@ -3,6 +3,8 @@ package dump
import (
"archive/tar"
"bytes"
+ "context"
+ "errors"
"fmt"
"io"
"os"
@@ -12,6 +14,8 @@ import (
"time"
"github.com/restic/restic/internal/fs"
+ "github.com/restic/restic/internal/restic"
+ rtest "github.com/restic/restic/internal/test"
)
func TestWriteTar(t *testing.T) {
@@ -112,3 +116,29 @@ func checkTar(t *testing.T, testDir string, srcTar *bytes.Buffer) error {
return nil
}
+
+// #4307.
+func TestFieldTooLong(t *testing.T) {
+ const maxSpecialFileSize = 1 << 20 // Unexported limit in archive/tar.
+
+ node := restic.Node{
+ Name: "file_with_xattr",
+ Path: "/file_with_xattr",
+ Type: "file",
+ Mode: 0644,
+ ExtendedAttributes: []restic.ExtendedAttribute{
+ {
+ Name: "user.way_too_large",
+ Value: make([]byte, 2*maxSpecialFileSize),
+ },
+ },
+ }
+
+ d := Dumper{format: "tar"}
+ err := d.dumpNodeTar(context.Background(), &node, tar.NewWriter(io.Discard))
+
+ // We want a tar.ErrFieldTooLong that has the filename.
+ rtest.Assert(t, errors.Is(err, tar.ErrFieldTooLong), "wrong type %T", err)
+ rtest.Assert(t, strings.Contains(err.Error(), node.Path),
+ "no filename in %q", err)
+}
diff --git a/internal/fs/fs_reader.go b/internal/fs/fs_reader.go
index 1551ad919..47af74245 100644
--- a/internal/fs/fs_reader.go
+++ b/internal/fs/fs_reader.go
@@ -35,7 +35,7 @@ var _ FS = &Reader{}
// VolumeName returns leading volume name, for the Reader file system it's
// always the empty string.
-func (fs *Reader) VolumeName(path string) string {
+func (fs *Reader) VolumeName(_ string) string {
return ""
}
@@ -76,7 +76,7 @@ func (fs *Reader) fi() os.FileInfo {
// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful,
// methods on the returned File can be used for I/O.
// If there is an error, it will be of type *os.PathError.
-func (fs *Reader) OpenFile(name string, flag int, perm os.FileMode) (f File, err error) {
+func (fs *Reader) OpenFile(name string, flag int, _ os.FileMode) (f File, err error) {
if flag & ^(O_RDONLY|O_NOFOLLOW) != 0 {
return nil, pathError("open", name,
fmt.Errorf("invalid combination of flags 0x%x", flag))
@@ -149,7 +149,7 @@ func (fs *Reader) Separator() string {
}
// IsAbs reports whether the path is absolute. For the Reader, this is always the case.
-func (fs *Reader) IsAbs(p string) bool {
+func (fs *Reader) IsAbs(_ string) bool {
return true
}
@@ -236,11 +236,11 @@ func (f fakeFile) Fd() uintptr {
return 0
}
-func (f fakeFile) Readdirnames(n int) ([]string, error) {
+func (f fakeFile) Readdirnames(_ int) ([]string, error) {
return nil, pathError("readdirnames", f.name, os.ErrInvalid)
}
-func (f fakeFile) Readdir(n int) ([]os.FileInfo, error) {
+func (f fakeFile) Readdir(_ int) ([]os.FileInfo, error) {
return nil, pathError("readdir", f.name, os.ErrInvalid)
}
@@ -248,7 +248,7 @@ func (f fakeFile) Seek(int64, int) (int64, error) {
return 0, pathError("seek", f.name, os.ErrInvalid)
}
-func (f fakeFile) Read(p []byte) (int, error) {
+func (f fakeFile) Read(_ []byte) (int, error) {
return 0, pathError("read", f.name, os.ErrInvalid)
}
diff --git a/internal/restorer/preallocate_darwin.go b/internal/fs/preallocate_darwin.go
index ae6e5ee3e..af46e971b 100644
--- a/internal/restorer/preallocate_darwin.go
+++ b/internal/fs/preallocate_darwin.go
@@ -1,4 +1,4 @@
-package restorer
+package fs
import (
"os"
@@ -6,7 +6,7 @@ import (
"golang.org/x/sys/unix"
)
-func preallocateFile(wr *os.File, size int64) error {
+func PreallocateFile(wr *os.File, size int64) error {
// try contiguous first
fst := unix.Fstore_t{
Flags: unix.F_ALLOCATECONTIG | unix.F_ALLOCATEALL,
diff --git a/internal/restorer/preallocate_linux.go b/internal/fs/preallocate_linux.go
index dc73ddfe2..30b9e4644 100644
--- a/internal/restorer/preallocate_linux.go
+++ b/internal/fs/preallocate_linux.go
@@ -1,4 +1,4 @@
-package restorer
+package fs
import (
"os"
@@ -6,7 +6,7 @@ import (
"golang.org/x/sys/unix"
)
-func preallocateFile(wr *os.File, size int64) error {
+func PreallocateFile(wr *os.File, size int64) error {
if size <= 0 {
return nil
}
diff --git a/internal/restorer/preallocate_other.go b/internal/fs/preallocate_other.go
index f01757bf4..4fb44d421 100644
--- a/internal/restorer/preallocate_other.go
+++ b/internal/fs/preallocate_other.go
@@ -1,11 +1,11 @@
//go:build !linux && !darwin
// +build !linux,!darwin
-package restorer
+package fs
import "os"
-func preallocateFile(wr *os.File, size int64) error {
+func PreallocateFile(wr *os.File, size int64) error {
// Maybe truncate can help?
// Windows: This calls SetEndOfFile which preallocates space on disk
return wr.Truncate(size)
diff --git a/internal/restorer/preallocate_test.go b/internal/fs/preallocate_test.go
index 0cc2b3f5d..9dabd2f36 100644
--- a/internal/restorer/preallocate_test.go
+++ b/internal/fs/preallocate_test.go
@@ -1,4 +1,4 @@
-package restorer
+package fs
import (
"os"
@@ -7,7 +7,6 @@ import (
"syscall"
"testing"
- "github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/test"
)
@@ -23,7 +22,7 @@ func TestPreallocate(t *testing.T) {
test.OK(t, wr.Close())
}()
- err = preallocateFile(wr, i)
+ err = PreallocateFile(wr, i)
if err == syscall.ENOTSUP {
t.SkipNow()
}
@@ -32,7 +31,7 @@ func TestPreallocate(t *testing.T) {
fi, err := wr.Stat()
test.OK(t, err)
- efi := fs.ExtendedStat(fi)
+ efi := ExtendedStat(fi)
test.Assert(t, efi.Size == i || efi.Blocks > 0, "Preallocated size of %v, got size %v block %v", i, efi.Size, efi.Blocks)
})
}
diff --git a/internal/fs/vss.go b/internal/fs/vss.go
index 9995f2d3e..5f0ea36d9 100644
--- a/internal/fs/vss.go
+++ b/internal/fs/vss.go
@@ -34,7 +34,7 @@ func HasSufficientPrivilegesForVSS() error {
// NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't
// finish within the timeout an error is returned.
func NewVssSnapshot(
- volume string, timeoutInSeconds uint, msgError ErrorHandler) (VssSnapshot, error) {
+ _ string, _ uint, _ ErrorHandler) (VssSnapshot, error) {
return VssSnapshot{}, errors.New("VSS snapshots are only supported on windows")
}
diff --git a/internal/fuse/dir.go b/internal/fuse/dir.go
index 3984f15af..242b4b03e 100644
--- a/internal/fuse/dir.go
+++ b/internal/fuse/dir.go
@@ -9,6 +9,7 @@ import (
"os"
"path/filepath"
"sync"
+ "syscall"
"github.com/anacrolix/fuse"
"github.com/anacrolix/fuse/fs"
@@ -119,7 +120,7 @@ func (d *dir) open(ctx context.Context) error {
return nil
}
-func (d *dir) Attr(ctx context.Context, a *fuse.Attr) error {
+func (d *dir) Attr(_ context.Context, a *fuse.Attr) error {
debug.Log("Attr()")
a.Inode = d.inode
a.Mode = os.ModeDir | d.node.Mode
@@ -202,7 +203,7 @@ func (d *dir) Lookup(ctx context.Context, name string) (fs.Node, error) {
node, ok := d.items[name]
if !ok {
debug.Log(" Lookup(%v) -> not found", name)
- return nil, fuse.ENOENT
+ return nil, syscall.ENOENT
}
inode := inodeFromNode(d.inode, node)
switch node.Type {
@@ -216,24 +217,15 @@ func (d *dir) Lookup(ctx context.Context, name string) (fs.Node, error) {
return newOther(d.root, inode, node)
default:
debug.Log(" node %v has unknown type %v", name, node.Type)
- return nil, fuse.ENOENT
+ return nil, syscall.ENOENT
}
}
-func (d *dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
- debug.Log("Listxattr(%v, %v)", d.node.Name, req.Size)
- for _, attr := range d.node.ExtendedAttributes {
- resp.Append(attr.Name)
- }
+func (d *dir) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
+ nodeToXattrList(d.node, req, resp)
return nil
}
-func (d *dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
- debug.Log("Getxattr(%v, %v, %v)", d.node.Name, req.Name, req.Size)
- attrval := d.node.GetExtendedAttribute(req.Name)
- if attrval != nil {
- resp.Xattr = attrval
- return nil
- }
- return fuse.ErrNoXattr
+func (d *dir) Getxattr(_ context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
+ return nodeGetXattr(d.node, req, resp)
}
diff --git a/internal/fuse/file.go b/internal/fuse/file.go
index 35bc2a73e..aec39273a 100644
--- a/internal/fuse/file.go
+++ b/internal/fuse/file.go
@@ -45,7 +45,7 @@ func newFile(root *Root, inode uint64, node *restic.Node) (fusefile *file, err e
}, nil
}
-func (f *file) Attr(ctx context.Context, a *fuse.Attr) error {
+func (f *file) Attr(_ context.Context, a *fuse.Attr) error {
debug.Log("Attr(%v)", f.node.Name)
a.Inode = f.inode
a.Mode = f.node.Mode
@@ -66,7 +66,7 @@ func (f *file) Attr(ctx context.Context, a *fuse.Attr) error {
}
-func (f *file) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) {
+func (f *file) Open(_ context.Context, _ *fuse.OpenRequest, _ *fuse.OpenResponse) (fs.Handle, error) {
debug.Log("open file %v with %d blobs", f.node.Name, len(f.node.Content))
var bytes uint64
@@ -166,20 +166,11 @@ func (f *openFile) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.R
return nil
}
-func (f *file) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
- debug.Log("Listxattr(%v, %v)", f.node.Name, req.Size)
- for _, attr := range f.node.ExtendedAttributes {
- resp.Append(attr.Name)
- }
+func (f *file) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
+ nodeToXattrList(f.node, req, resp)
return nil
}
-func (f *file) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
- debug.Log("Getxattr(%v, %v, %v)", f.node.Name, req.Name, req.Size)
- attrval := f.node.GetExtendedAttribute(req.Name)
- if attrval != nil {
- resp.Xattr = attrval
- return nil
- }
- return fuse.ErrNoXattr
+func (f *file) Getxattr(_ context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
+ return nodeGetXattr(f.node, req, resp)
}
diff --git a/internal/fuse/fuse_test.go b/internal/fuse/fuse_test.go
index 9ca1ec0c6..0a121b986 100644
--- a/internal/fuse/fuse_test.go
+++ b/internal/fuse/fuse_test.go
@@ -73,7 +73,7 @@ func TestFuseFile(t *testing.T) {
timestamp, err := time.Parse(time.RFC3339, "2017-01-24T10:42:56+01:00")
rtest.OK(t, err)
- restic.TestCreateSnapshot(t, repo, timestamp, 2, 0.1)
+ restic.TestCreateSnapshot(t, repo, timestamp, 2)
sn := loadFirstSnapshot(t, repo)
tree := loadTree(t, repo, *sn.Tree)
@@ -180,7 +180,7 @@ func TestFuseDir(t *testing.T) {
// Test top-level directories for their UID and GID.
func TestTopUIDGID(t *testing.T) {
repo := repository.TestRepository(t)
- restic.TestCreateSnapshot(t, repo, time.Unix(1460289341, 207401672), 0, 0)
+ restic.TestCreateSnapshot(t, repo, time.Unix(1460289341, 207401672), 0)
testTopUIDGID(t, Config{}, repo, uint32(os.Getuid()), uint32(os.Getgid()))
testTopUIDGID(t, Config{OwnerIsRoot: true}, repo, 0, 0)
@@ -271,6 +271,31 @@ func TestInodeFromNode(t *testing.T) {
rtest.Assert(t, inoA != inoAbb, "inode(a/b/b) = inode(a)")
}
+func TestLink(t *testing.T) {
+ node := &restic.Node{Name: "foo.txt", Type: "symlink", Links: 1, LinkTarget: "dst", ExtendedAttributes: []restic.ExtendedAttribute{
+ {Name: "foo", Value: []byte("bar")},
+ }}
+
+ lnk, err := newLink(&Root{}, 42, node)
+ rtest.OK(t, err)
+ target, err := lnk.Readlink(context.TODO(), nil)
+ rtest.OK(t, err)
+ rtest.Equals(t, node.LinkTarget, target)
+
+ exp := &fuse.ListxattrResponse{}
+ exp.Append("foo")
+ resp := &fuse.ListxattrResponse{}
+ rtest.OK(t, lnk.Listxattr(context.TODO(), &fuse.ListxattrRequest{}, resp))
+ rtest.Equals(t, exp.Xattr, resp.Xattr)
+
+ getResp := &fuse.GetxattrResponse{}
+ rtest.OK(t, lnk.Getxattr(context.TODO(), &fuse.GetxattrRequest{Name: "foo"}, getResp))
+ rtest.Equals(t, node.ExtendedAttributes[0].Value, getResp.Xattr)
+
+ err = lnk.Getxattr(context.TODO(), &fuse.GetxattrRequest{Name: "invalid"}, nil)
+ rtest.Assert(t, err != nil, "missing error on reading invalid xattr")
+}
+
var sink uint64
func BenchmarkInode(b *testing.B) {
diff --git a/internal/fuse/link.go b/internal/fuse/link.go
index 47ee666a3..3aea8b06e 100644
--- a/internal/fuse/link.go
+++ b/internal/fuse/link.go
@@ -24,11 +24,11 @@ func newLink(root *Root, inode uint64, node *restic.Node) (*link, error) {
return &link{root: root, inode: inode, node: node}, nil
}
-func (l *link) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) {
+func (l *link) Readlink(_ context.Context, _ *fuse.ReadlinkRequest) (string, error) {
return l.node.LinkTarget, nil
}
-func (l *link) Attr(ctx context.Context, a *fuse.Attr) error {
+func (l *link) Attr(_ context.Context, a *fuse.Attr) error {
a.Inode = l.inode
a.Mode = l.node.Mode
@@ -46,3 +46,12 @@ func (l *link) Attr(ctx context.Context, a *fuse.Attr) error {
return nil
}
+
+func (l *link) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
+ nodeToXattrList(l.node, req, resp)
+ return nil
+}
+
+func (l *link) Getxattr(_ context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
+ return nodeGetXattr(l.node, req, resp)
+}
diff --git a/internal/fuse/other.go b/internal/fuse/other.go
index 1a78403a7..f536de5c1 100644
--- a/internal/fuse/other.go
+++ b/internal/fuse/other.go
@@ -20,11 +20,11 @@ func newOther(root *Root, inode uint64, node *restic.Node) (*other, error) {
return &other{root: root, inode: inode, node: node}, nil
}
-func (l *other) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) {
+func (l *other) Readlink(_ context.Context, _ *fuse.ReadlinkRequest) (string, error) {
return l.node.LinkTarget, nil
}
-func (l *other) Attr(ctx context.Context, a *fuse.Attr) error {
+func (l *other) Attr(_ context.Context, a *fuse.Attr) error {
a.Inode = l.inode
a.Mode = l.node.Mode
diff --git a/internal/fuse/snapshots_dir.go b/internal/fuse/snapshots_dir.go
index c19155741..61df3ad08 100644
--- a/internal/fuse/snapshots_dir.go
+++ b/internal/fuse/snapshots_dir.go
@@ -6,6 +6,7 @@ package fuse
import (
"context"
"os"
+ "syscall"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/restic"
@@ -41,7 +42,7 @@ func NewSnapshotsDir(root *Root, inode, parentInode uint64, dirStruct *Snapshots
}
// Attr returns the attributes for any dir in the snapshots directory structure
-func (d *SnapshotsDir) Attr(ctx context.Context, attr *fuse.Attr) error {
+func (d *SnapshotsDir) Attr(_ context.Context, attr *fuse.Attr) error {
attr.Inode = d.inode
attr.Mode = os.ModeDir | 0555
attr.Uid = d.root.uid
@@ -60,7 +61,7 @@ func (d *SnapshotsDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
if err != nil {
return nil, unwrapCtxCanceled(err)
} else if meta == nil {
- return nil, fuse.ENOENT
+ return nil, syscall.ENOENT
}
items := []fuse.Dirent{
@@ -99,7 +100,7 @@ func (d *SnapshotsDir) Lookup(ctx context.Context, name string) (fs.Node, error)
if err != nil {
return nil, unwrapCtxCanceled(err)
} else if meta == nil {
- return nil, fuse.ENOENT
+ return nil, syscall.ENOENT
}
entry := meta.names[name]
@@ -114,7 +115,7 @@ func (d *SnapshotsDir) Lookup(ctx context.Context, name string) (fs.Node, error)
}
}
- return nil, fuse.ENOENT
+ return nil, syscall.ENOENT
}
// SnapshotLink
@@ -133,12 +134,12 @@ func newSnapshotLink(root *Root, inode uint64, target string, snapshot *restic.S
}
// Readlink
-func (l *snapshotLink) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) {
+func (l *snapshotLink) Readlink(_ context.Context, _ *fuse.ReadlinkRequest) (string, error) {
return l.target, nil
}
// Attr
-func (l *snapshotLink) Attr(ctx context.Context, a *fuse.Attr) error {
+func (l *snapshotLink) Attr(_ context.Context, a *fuse.Attr) error {
a.Inode = l.inode
a.Mode = os.ModeSymlink | 0777
a.Size = uint64(len(l.target))
diff --git a/internal/fuse/xattr.go b/internal/fuse/xattr.go
new file mode 100644
index 000000000..e534c3c0e
--- /dev/null
+++ b/internal/fuse/xattr.go
@@ -0,0 +1,27 @@
+//go:build darwin || freebsd || linux
+// +build darwin freebsd linux
+
+package fuse
+
+import (
+ "github.com/anacrolix/fuse"
+ "github.com/restic/restic/internal/debug"
+ "github.com/restic/restic/internal/restic"
+)
+
+func nodeToXattrList(node *restic.Node, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) {
+ debug.Log("Listxattr(%v, %v)", node.Name, req.Size)
+ for _, attr := range node.ExtendedAttributes {
+ resp.Append(attr.Name)
+ }
+}
+
+func nodeGetXattr(node *restic.Node, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
+ debug.Log("Getxattr(%v, %v, %v)", node.Name, req.Name, req.Size)
+ attrval := node.GetExtendedAttribute(req.Name)
+ if attrval != nil {
+ resp.Xattr = attrval
+ return nil
+ }
+ return fuse.ErrNoXattr
+}
diff --git a/internal/index/index_parallel.go b/internal/index/index_parallel.go
index a76b08a4e..e7e46e88a 100644
--- a/internal/index/index_parallel.go
+++ b/internal/index/index_parallel.go
@@ -24,7 +24,7 @@ func ForAllIndexes(ctx context.Context, repo restic.Repository,
var idx *Index
oldFormat := false
- buf, err := repo.LoadUnpacked(ctx, restic.IndexFile, id, nil)
+ buf, err := repo.LoadUnpacked(ctx, restic.IndexFile, id)
if err == nil {
idx, oldFormat, err = DecodeIndex(buf, id)
}
diff --git a/internal/index/indexmap.go b/internal/index/indexmap.go
index ef3539d48..2386e01b6 100644
--- a/internal/index/indexmap.go
+++ b/internal/index/indexmap.go
@@ -17,12 +17,12 @@ import (
// needs to be resized when the table grows, preventing memory usage spikes.
type indexMap struct {
// The number of buckets is always a power of two and never zero.
- buckets []*indexEntry
+ buckets []uint
numentries uint
mh maphash.Hash
- free *indexEntry // Free list.
+ blockList hashedArrayTree
}
const (
@@ -41,7 +41,7 @@ func (m *indexMap) add(id restic.ID, packIdx int, offset, length uint32, uncompr
}
h := m.hash(id)
- e := m.newEntry()
+ e, idx := m.newEntry()
e.id = id
e.next = m.buckets[h] // Prepend to existing chain.
e.packIndex = packIdx
@@ -49,18 +49,16 @@ func (m *indexMap) add(id restic.ID, packIdx int, offset, length uint32, uncompr
e.length = length
e.uncompressedLength = uncompressedLength
- m.buckets[h] = e
+ m.buckets[h] = idx
m.numentries++
}
// foreach calls fn for all entries in the map, until fn returns false.
func (m *indexMap) foreach(fn func(*indexEntry) bool) {
- for _, e := range m.buckets {
- for e != nil {
- if !fn(e) {
- return
- }
- e = e.next
+ blockCount := m.blockList.Size()
+ for i := uint(1); i < blockCount; i++ {
+ if !fn(m.resolve(i)) {
+ return
}
}
}
@@ -72,7 +70,10 @@ func (m *indexMap) foreachWithID(id restic.ID, fn func(*indexEntry)) {
}
h := m.hash(id)
- for e := m.buckets[h]; e != nil; e = e.next {
+ ei := m.buckets[h]
+ for ei != 0 {
+ e := m.resolve(ei)
+ ei = e.next
if e.id != id {
continue
}
@@ -87,26 +88,27 @@ func (m *indexMap) get(id restic.ID) *indexEntry {
}
h := m.hash(id)
- for e := m.buckets[h]; e != nil; e = e.next {
+ ei := m.buckets[h]
+ for ei != 0 {
+ e := m.resolve(ei)
if e.id == id {
return e
}
+ ei = e.next
}
return nil
}
func (m *indexMap) grow() {
- old := m.buckets
- m.buckets = make([]*indexEntry, growthFactor*len(m.buckets))
-
- for _, e := range old {
- for e != nil {
- h := m.hash(e.id)
- next := e.next
- e.next = m.buckets[h]
- m.buckets[h] = e
- e = next
- }
+ m.buckets = make([]uint, growthFactor*len(m.buckets))
+
+ blockCount := m.blockList.Size()
+ for i := uint(1); i < blockCount; i++ {
+ e := m.resolve(i)
+
+ h := m.hash(e.id)
+ e.next = m.buckets[h]
+ m.buckets[h] = i
}
}
@@ -124,47 +126,106 @@ func (m *indexMap) hash(id restic.ID) uint {
func (m *indexMap) init() {
const initialBuckets = 64
- m.buckets = make([]*indexEntry, initialBuckets)
+ m.buckets = make([]uint, initialBuckets)
+ // first entry in blockList serves as null byte
+ m.blockList = *newHAT()
+ m.newEntry()
}
func (m *indexMap) len() uint { return m.numentries }
-func (m *indexMap) newEntry() *indexEntry {
- // We keep a free list of objects to speed up allocation and GC.
- // There's an obvious trade-off here: allocating in larger batches
- // means we allocate faster and the GC has to keep fewer bits to track
- // what we have in use, but it means we waste some space.
- //
- // Then again, allocating each indexEntry separately also wastes space
- // on 32-bit platforms, because the Go malloc has no size class for
- // exactly 52 bytes, so it puts the indexEntry in a 64-byte slot instead.
- // See src/runtime/sizeclasses.go in the Go source repo.
- //
- // The batch size of 4 means we hit the size classes for 4×64=256 bytes
- // (64-bit) and 4×52=208 bytes (32-bit), wasting nothing in malloc on
- // 64-bit and relatively little on 32-bit.
- const entryAllocBatch = 4
-
- e := m.free
- if e != nil {
- m.free = e.next
- } else {
- free := new([entryAllocBatch]indexEntry)
- e = &free[0]
- for i := 1; i < len(free)-1; i++ {
- free[i].next = &free[i+1]
- }
- m.free = &free[1]
- }
+func (m *indexMap) newEntry() (*indexEntry, uint) {
+ return m.blockList.Alloc()
+}
- return e
+func (m *indexMap) resolve(idx uint) *indexEntry {
+ return m.blockList.Ref(idx)
}
type indexEntry struct {
id restic.ID
- next *indexEntry
+ next uint
packIndex int // Position in containing Index's packs field.
offset uint32
length uint32
uncompressedLength uint32
}
+
+type hashedArrayTree struct {
+ mask uint
+ maskShift uint
+ blockSize uint
+
+ size uint
+ blockList [][]indexEntry
+}
+
+func newHAT() *hashedArrayTree {
+ // start with a small block size
+ blockSizePower := uint(2)
+ blockSize := uint(1 << blockSizePower)
+
+ return &hashedArrayTree{
+ mask: blockSize - 1,
+ maskShift: blockSizePower,
+ blockSize: blockSize,
+ size: 0,
+ blockList: make([][]indexEntry, blockSize),
+ }
+}
+
+func (h *hashedArrayTree) Alloc() (*indexEntry, uint) {
+ h.grow()
+ size := h.size
+ idx, subIdx := h.index(size)
+ h.size++
+ return &h.blockList[idx][subIdx], size
+}
+
+func (h *hashedArrayTree) index(pos uint) (idx uint, subIdx uint) {
+ subIdx = pos & h.mask
+ idx = pos >> h.maskShift
+ return
+}
+
+func (h *hashedArrayTree) Ref(pos uint) *indexEntry {
+ if pos >= h.size {
+ panic("array index out of bounds")
+ }
+
+ idx, subIdx := h.index(pos)
+ return &h.blockList[idx][subIdx]
+}
+
+func (h *hashedArrayTree) Size() uint {
+ return h.size
+}
+
+func (h *hashedArrayTree) grow() {
+ idx, subIdx := h.index(h.size)
+ if int(idx) == len(h.blockList) {
+ // blockList is too small -> double list and block size
+ h.blockSize *= 2
+ h.mask = h.mask*2 + 1
+ h.maskShift++
+ idx = idx / 2
+
+ oldBlocks := h.blockList
+ h.blockList = make([][]indexEntry, h.blockSize)
+
+ // pairwise merging of blocks
+ for i := 0; i < len(oldBlocks); i += 2 {
+ block := make([]indexEntry, 0, h.blockSize)
+ block = append(block, oldBlocks[i]...)
+ block = append(block, oldBlocks[i+1]...)
+ h.blockList[i/2] = block
+ // allow GC
+ oldBlocks[i] = nil
+ oldBlocks[i+1] = nil
+ }
+ }
+ if subIdx == 0 {
+ // new index entry batch
+ h.blockList[idx] = make([]indexEntry, h.blockSize)
+ }
+}
diff --git a/internal/index/indexmap_test.go b/internal/index/indexmap_test.go
index 391131ca0..a16670c7d 100644
--- a/internal/index/indexmap_test.go
+++ b/internal/index/indexmap_test.go
@@ -108,6 +108,21 @@ func TestIndexMapForeachWithID(t *testing.T) {
}
}
+func TestHashedArrayTree(t *testing.T) {
+ hat := newHAT()
+ const testSize = 1024
+ for i := uint(0); i < testSize; i++ {
+ rtest.Assert(t, hat.Size() == i, "expected hat size %v got %v", i, hat.Size())
+ e, idx := hat.Alloc()
+ rtest.Assert(t, idx == i, "expected entry at idx %v got %v", i, idx)
+ e.length = uint32(i)
+ }
+ for i := uint(0); i < testSize; i++ {
+ e := hat.Ref(i)
+ rtest.Assert(t, e.length == uint32(i), "expected entry to contain %v got %v", uint32(i), e.length)
+ }
+}
+
func BenchmarkIndexMapHash(b *testing.B) {
var m indexMap
m.add(restic.ID{}, 0, 0, 0, 0) // Trigger lazy initialization.
diff --git a/internal/index/master_index_test.go b/internal/index/master_index_test.go
index e97339564..45286e89c 100644
--- a/internal/index/master_index_test.go
+++ b/internal/index/master_index_test.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"math/rand"
+ "runtime"
"testing"
"time"
@@ -323,16 +324,27 @@ func BenchmarkMasterIndexEach(b *testing.B) {
}
}
+func BenchmarkMasterIndexGC(b *testing.B) {
+ mIdx, _ := createRandomMasterIndex(b, rand.New(rand.NewSource(0)), 100, 10000)
+
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ runtime.GC()
+ }
+ runtime.KeepAlive(mIdx)
+}
+
var (
snapshotTime = time.Unix(1470492820, 207401672)
depth = 3
)
-func createFilledRepo(t testing.TB, snapshots int, dup float32, version uint) restic.Repository {
+func createFilledRepo(t testing.TB, snapshots int, version uint) restic.Repository {
repo := repository.TestRepositoryWithVersion(t, version)
- for i := 0; i < 3; i++ {
- restic.TestCreateSnapshot(t, repo, snapshotTime.Add(time.Duration(i)*time.Second), depth, dup)
+ for i := 0; i < snapshots; i++ {
+ restic.TestCreateSnapshot(t, repo, snapshotTime.Add(time.Duration(i)*time.Second), depth)
}
return repo
}
@@ -342,7 +354,7 @@ func TestIndexSave(t *testing.T) {
}
func testIndexSave(t *testing.T, version uint) {
- repo := createFilledRepo(t, 3, 0, version)
+ repo := createFilledRepo(t, 3, version)
err := repo.LoadIndex(context.TODO())
if err != nil {
diff --git a/internal/migrations/s3_layout.go b/internal/migrations/s3_layout.go
index d42b94bf8..9effaee70 100644
--- a/internal/migrations/s3_layout.go
+++ b/internal/migrations/s3_layout.go
@@ -8,7 +8,6 @@ import (
"github.com/restic/restic/internal/backend/layout"
"github.com/restic/restic/internal/backend/s3"
- "github.com/restic/restic/internal/cache"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
@@ -22,24 +21,9 @@ func init() {
// "default" layout.
type S3Layout struct{}
-func toS3Backend(repo restic.Repository) *s3.Backend {
- b := repo.Backend()
- // unwrap cache
- if be, ok := b.(*cache.Backend); ok {
- b = be.Backend
- }
-
- be, ok := b.(*s3.Backend)
- if !ok {
- debug.Log("backend is not s3")
- return nil
- }
- return be
-}
-
// Check tests whether the migration can be applied.
-func (m *S3Layout) Check(ctx context.Context, repo restic.Repository) (bool, string, error) {
- be := toS3Backend(repo)
+func (m *S3Layout) Check(_ context.Context, repo restic.Repository) (bool, string, error) {
+ be := restic.AsBackend[*s3.Backend](repo.Backend())
if be == nil {
debug.Log("backend is not s3")
return false, "backend is not s3", nil
@@ -91,7 +75,7 @@ func (m *S3Layout) moveFiles(ctx context.Context, be *s3.Backend, l layout.Layou
// Apply runs the migration.
func (m *S3Layout) Apply(ctx context.Context, repo restic.Repository) error {
- be := toS3Backend(repo)
+ be := restic.AsBackend[*s3.Backend](repo.Backend())
if be == nil {
debug.Log("backend is not s3")
return errors.New("backend is not s3")
diff --git a/internal/migrations/upgrade_repo_v2.go b/internal/migrations/upgrade_repo_v2.go
index 43a6cd91c..a81abc0e3 100644
--- a/internal/migrations/upgrade_repo_v2.go
+++ b/internal/migrations/upgrade_repo_v2.go
@@ -44,7 +44,7 @@ func (*UpgradeRepoV2) Desc() string {
return "upgrade a repository to version 2"
}
-func (*UpgradeRepoV2) Check(ctx context.Context, repo restic.Repository) (bool, string, error) {
+func (*UpgradeRepoV2) Check(_ context.Context, repo restic.Repository) (bool, string, error) {
isV1 := repo.Config().Version == 1
reason := ""
if !isV1 {
diff --git a/internal/repository/key.go b/internal/repository/key.go
index b3e13ade4..fd20b8e5f 100644
--- a/internal/repository/key.go
+++ b/internal/repository/key.go
@@ -21,7 +21,7 @@ var (
ErrNoKeyFound = errors.New("wrong password or no key found")
// ErrMaxKeysReached is returned when the maximum number of keys was checked and no key could be found.
- ErrMaxKeysReached = errors.Fatal("maximum number of keys reached")
+ ErrMaxKeysReached = errors.New("maximum number of keys reached")
)
// Key represents an encrypted master key for a repository.
diff --git a/internal/repository/repack.go b/internal/repository/repack.go
index 6adff69f4..c82e63f28 100644
--- a/internal/repository/repack.go
+++ b/internal/repository/repack.go
@@ -29,7 +29,7 @@ func Repack(ctx context.Context, repo restic.Repository, dstRepo restic.Reposito
debug.Log("repacking %d packs while keeping %d blobs", len(packs), keepBlobs.Len())
if repo == dstRepo && dstRepo.Connections() < 2 {
- return nil, errors.Fatal("repack step requires a backend connection limit of at least two")
+ return nil, errors.New("repack step requires a backend connection limit of at least two")
}
wg, wgCtx := errgroup.WithContext(ctx)
diff --git a/internal/repository/repository.go b/internal/repository/repository.go
index df8a6fb68..653c1f774 100644
--- a/internal/repository/repository.go
+++ b/internal/repository/repository.go
@@ -7,6 +7,7 @@ import (
"fmt"
"io"
"os"
+ "runtime"
"sort"
"sync"
@@ -110,16 +111,16 @@ func (c *CompressionMode) Type() string {
// New returns a new repository with backend be.
func New(be restic.Backend, opts Options) (*Repository, error) {
if opts.Compression == CompressionInvalid {
- return nil, errors.Fatalf("invalid compression mode")
+ return nil, errors.New("invalid compression mode")
}
if opts.PackSize == 0 {
opts.PackSize = DefaultPackSize
}
if opts.PackSize > MaxPackSize {
- return nil, errors.Fatalf("pack size larger than limit of %v MiB", MaxPackSize/1024/1024)
+ return nil, fmt.Errorf("pack size larger than limit of %v MiB", MaxPackSize/1024/1024)
} else if opts.PackSize < MinPackSize {
- return nil, errors.Fatalf("pack size smaller than minimum of %v MiB", MinPackSize/1024/1024)
+ return nil, fmt.Errorf("pack size smaller than minimum of %v MiB", MinPackSize/1024/1024)
}
repo := &Repository{
@@ -170,14 +171,8 @@ func (r *Repository) SetDryRun() {
r.be = dryrun.New(r.be)
}
-// LoadUnpacked loads and decrypts the file with the given type and ID, using
-// the supplied buffer (which must be empty). If the buffer is nil, a new
-// buffer will be allocated and returned.
-func (r *Repository) LoadUnpacked(ctx context.Context, t restic.FileType, id restic.ID, buf []byte) ([]byte, error) {
- if len(buf) != 0 {
- panic("buf is not empty")
- }
-
+// LoadUnpacked loads and decrypts the file with the given type and ID.
+func (r *Repository) LoadUnpacked(ctx context.Context, t restic.FileType, id restic.ID) ([]byte, error) {
debug.Log("load %v with id %v", t, id)
if t == restic.ConfigFile {
@@ -189,15 +184,17 @@ func (r *Repository) LoadUnpacked(ctx context.Context, t restic.FileType, id res
h := restic.Handle{Type: t, Name: id.String()}
retriedInvalidData := false
var dataErr error
+ wr := new(bytes.Buffer)
+
err := r.be.Load(ctx, h, 0, 0, func(rd io.Reader) error {
// make sure this call is idempotent, in case an error occurs
- wr := bytes.NewBuffer(buf[:0])
+ wr.Reset()
_, cerr := io.Copy(wr, rd)
if cerr != nil {
return cerr
}
- buf = wr.Bytes()
+ buf := wr.Bytes()
if t != restic.ConfigFile && !restic.Hash(buf).Equal(id) {
debug.Log("retry loading broken blob %v", h)
if !retriedInvalidData {
@@ -221,6 +218,7 @@ func (r *Repository) LoadUnpacked(ctx context.Context, t restic.FileType, id res
return nil, err
}
+ buf := wr.Bytes()
nonce, ciphertext := buf[:r.key.NonceSize()], buf[r.key.NonceSize():]
plaintext, err := r.key.Open(ciphertext[:0], nonce, ciphertext, nil)
if err != nil {
@@ -596,7 +594,7 @@ func (r *Repository) LoadIndex(ctx context.Context) error {
})
if err != nil {
- return errors.Fatal(err.Error())
+ return err
}
err = r.idx.MergeFinalIndexes()
@@ -604,6 +602,9 @@ func (r *Repository) LoadIndex(ctx context.Context) error {
return err
}
+ // Trigger GC to reset garbage collection threshold
+ runtime.GC()
+
if r.cfg.Version < 2 {
// sanity check
ctx, cancel := context.WithCancel(ctx)
@@ -616,7 +617,7 @@ func (r *Repository) LoadIndex(ctx context.Context) error {
}
})
if invalidIndex {
- return errors.Fatal("index uses feature not supported by repository version 1")
+ return errors.New("index uses feature not supported by repository version 1")
}
}
@@ -681,7 +682,7 @@ func (r *Repository) CreateIndexFromPacks(ctx context.Context, packsize map[rest
err = wg.Wait()
if err != nil {
- return invalid, errors.Fatal(err.Error())
+ return invalid, err
}
return invalid, nil
@@ -726,9 +727,9 @@ func (r *Repository) SearchKey(ctx context.Context, password string, maxKeys int
r.keyID = key.ID()
cfg, err := restic.LoadConfig(ctx, r)
if err == crypto.ErrUnauthenticated {
- return errors.Fatalf("config or key %v is damaged: %v", key.ID(), err)
+ return fmt.Errorf("config or key %v is damaged: %w", key.ID(), err)
} else if err != nil {
- return errors.Fatalf("config cannot be loaded: %v", err)
+ return fmt.Errorf("config cannot be loaded: %w", err)
}
r.setConfig(cfg)
diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go
index 6c04f1f95..f26bf46f2 100644
--- a/internal/repository/repository_test.go
+++ b/internal/repository/repository_test.go
@@ -21,7 +21,6 @@ import (
"github.com/restic/restic/internal/index"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
- "github.com/restic/restic/internal/test"
rtest "github.com/restic/restic/internal/test"
"golang.org/x/sync/errgroup"
)
@@ -232,7 +231,7 @@ func benchmarkLoadUnpacked(b *testing.B, version uint) {
b.SetBytes(int64(length))
for i := 0; i < b.N; i++ {
- data, err := repo.LoadUnpacked(context.TODO(), restic.PackFile, storageID, nil)
+ data, err := repo.LoadUnpacked(context.TODO(), restic.PackFile, storageID)
rtest.OK(b, err)
// See comment in BenchmarkLoadBlob.
@@ -261,7 +260,7 @@ func TestRepositoryLoadIndex(t *testing.T) {
// loadIndex loads the index id from backend and returns it.
func loadIndex(ctx context.Context, repo restic.Repository, id restic.ID) (*index.Index, error) {
- buf, err := repo.LoadUnpacked(ctx, restic.IndexFile, id, nil)
+ buf, err := repo.LoadUnpacked(ctx, restic.IndexFile, id)
if err != nil {
return nil, err
}
@@ -289,7 +288,7 @@ func TestRepositoryLoadUnpackedBroken(t *testing.T) {
rtest.OK(t, err)
// without a retry backend this will just return an error that the file is broken
- _, err = repo.LoadUnpacked(context.TODO(), restic.IndexFile, id, nil)
+ _, err = repo.LoadUnpacked(context.TODO(), restic.IndexFile, id)
if err == nil {
t.Fatal("missing expected error")
}
@@ -322,7 +321,7 @@ func TestRepositoryLoadUnpackedRetryBroken(t *testing.T) {
rtest.OK(t, err)
repo, err := repository.New(&damageOnceBackend{Backend: be}, repository.Options{})
rtest.OK(t, err)
- err = repo.SearchKey(context.TODO(), test.TestPassword, 10, "")
+ err = repo.SearchKey(context.TODO(), rtest.TestPassword, 10, "")
rtest.OK(t, err)
rtest.OK(t, repo.LoadIndex(context.TODO()))
@@ -347,6 +346,7 @@ func benchmarkLoadIndex(b *testing.B, version uint) {
},
})
}
+ idx.Finalize()
id, err := index.SaveIndex(context.TODO(), repo, idx)
rtest.OK(b, err)
@@ -428,7 +428,7 @@ func testRepositoryIncrementalIndex(t *testing.T, version uint) {
}
// buildPackfileWithoutHeader returns a manually built pack file without a header.
-func buildPackfileWithoutHeader(t testing.TB, blobSizes []int, key *crypto.Key, compress bool) (blobs []restic.Blob, packfile []byte) {
+func buildPackfileWithoutHeader(blobSizes []int, key *crypto.Key, compress bool) (blobs []restic.Blob, packfile []byte) {
opts := []zstd.EOption{
// Set the compression level configured.
zstd.WithEncoderLevel(zstd.SpeedDefault),
@@ -446,7 +446,7 @@ func buildPackfileWithoutHeader(t testing.TB, blobSizes []int, key *crypto.Key,
var offset uint
for i, size := range blobSizes {
- plaintext := test.Random(800+i, size)
+ plaintext := rtest.Random(800+i, size)
id := restic.Hash(plaintext)
uncompressedLength := uint(0)
if compress {
@@ -525,7 +525,7 @@ func testStreamPack(t *testing.T, version uint) {
t.Fatal("test does not suport repository version", version)
}
- packfileBlobs, packfile := buildPackfileWithoutHeader(t, blobSizes, &key, compress)
+ packfileBlobs, packfile := buildPackfileWithoutHeader(blobSizes, &key, compress)
loadCalls := 0
load := func(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
diff --git a/internal/repository/testing.go b/internal/repository/testing.go
index 879650336..4936cc368 100644
--- a/internal/repository/testing.go
+++ b/internal/repository/testing.go
@@ -34,7 +34,7 @@ func TestUseLowSecurityKDFParameters(t logger) {
}
// TestBackend returns a fully configured in-memory backend.
-func TestBackend(t testing.TB) restic.Backend {
+func TestBackend(_ testing.TB) restic.Backend {
return mem.New()
}
diff --git a/internal/restic/backend.go b/internal/restic/backend.go
index bc139fc8b..df3281641 100644
--- a/internal/restic/backend.go
+++ b/internal/restic/backend.go
@@ -70,8 +70,43 @@ type Backend interface {
Delete(ctx context.Context) error
}
+type BackendUnwrapper interface {
+ // Unwrap returns the underlying backend or nil if there is none.
+ Unwrap() Backend
+}
+
+func AsBackend[B Backend](b Backend) B {
+ for b != nil {
+ if be, ok := b.(B); ok {
+ return be
+ }
+
+ if be, ok := b.(BackendUnwrapper); ok {
+ b = be.Unwrap()
+ } else {
+ // not the backend we're looking for
+ break
+ }
+ }
+ var be B
+ return be
+}
+
+type FreezeBackend interface {
+ Backend
+ // Freeze blocks all backend operations except those on lock files
+ Freeze()
+ // Unfreeze allows all backend operations to continue
+ Unfreeze()
+}
+
// FileInfo is contains information about a file in the backend.
type FileInfo struct {
Size int64
Name string
}
+
+// ApplyEnvironmenter fills in a backend configuration from the environment
+type ApplyEnvironmenter interface {
+ ApplyEnvironment(prefix string)
+}
diff --git a/internal/restic/backend_test.go b/internal/restic/backend_test.go
new file mode 100644
index 000000000..a970eb5b3
--- /dev/null
+++ b/internal/restic/backend_test.go
@@ -0,0 +1,38 @@
+package restic_test
+
+import (
+ "testing"
+
+ "github.com/restic/restic/internal/restic"
+ "github.com/restic/restic/internal/test"
+)
+
+type testBackend struct {
+ restic.Backend
+}
+
+func (t *testBackend) Unwrap() restic.Backend {
+ return nil
+}
+
+type otherTestBackend struct {
+ restic.Backend
+}
+
+func (t *otherTestBackend) Unwrap() restic.Backend {
+ return t.Backend
+}
+
+func TestAsBackend(t *testing.T) {
+ other := otherTestBackend{}
+ test.Assert(t, restic.AsBackend[*testBackend](other) == nil, "otherTestBackend is not a testBackend backend")
+
+ testBe := &testBackend{}
+ test.Assert(t, restic.AsBackend[*testBackend](testBe) == testBe, "testBackend was not returned")
+
+ wrapper := &otherTestBackend{Backend: testBe}
+ test.Assert(t, restic.AsBackend[*testBackend](wrapper) == testBe, "failed to unwrap testBackend backend")
+
+ wrapper.Backend = other
+ test.Assert(t, restic.AsBackend[*testBackend](wrapper) == nil, "a wrapped otherTestBackend is not a testBackend")
+}
diff --git a/internal/restic/config_test.go b/internal/restic/config_test.go
index 662a2e69e..5a7f6b0ae 100644
--- a/internal/restic/config_test.go
+++ b/internal/restic/config_test.go
@@ -12,7 +12,7 @@ type saver struct {
fn func(restic.FileType, []byte) (restic.ID, error)
}
-func (s saver) SaveUnpacked(ctx context.Context, t restic.FileType, buf []byte) (restic.ID, error) {
+func (s saver) SaveUnpacked(_ context.Context, t restic.FileType, buf []byte) (restic.ID, error) {
return s.fn(t, buf)
}
@@ -21,11 +21,11 @@ func (s saver) Connections() uint {
}
type loader struct {
- fn func(restic.FileType, restic.ID, []byte) ([]byte, error)
+ fn func(restic.FileType, restic.ID) ([]byte, error)
}
-func (l loader) LoadUnpacked(ctx context.Context, t restic.FileType, id restic.ID, buf []byte) (data []byte, err error) {
- return l.fn(t, id, buf)
+func (l loader) LoadUnpacked(_ context.Context, t restic.FileType, id restic.ID) (data []byte, err error) {
+ return l.fn(t, id)
}
func (l loader) Connections() uint {
@@ -49,7 +49,7 @@ func TestConfig(t *testing.T) {
err = restic.SaveConfig(context.TODO(), saver{save}, cfg1)
rtest.OK(t, err)
- load := func(tpe restic.FileType, id restic.ID, in []byte) ([]byte, error) {
+ load := func(tpe restic.FileType, id restic.ID) ([]byte, error) {
rtest.Assert(t, tpe == restic.ConfigFile,
"wrong backend type: got %v, wanted %v",
tpe, restic.ConfigFile)
diff --git a/internal/restic/find_test.go b/internal/restic/find_test.go
index f5e288b9d..1ae30ded9 100644
--- a/internal/restic/find_test.go
+++ b/internal/restic/find_test.go
@@ -88,7 +88,7 @@ func TestFindUsedBlobs(t *testing.T) {
var snapshots []*restic.Snapshot
for i := 0; i < findTestSnapshots; i++ {
- sn := restic.TestCreateSnapshot(t, repo, findTestTime.Add(time.Duration(i)*time.Second), findTestDepth, 0)
+ sn := restic.TestCreateSnapshot(t, repo, findTestTime.Add(time.Duration(i)*time.Second), findTestDepth)
t.Logf("snapshot %v saved, tree %v", sn.ID().Str(), sn.Tree.Str())
snapshots = append(snapshots, sn)
}
@@ -131,7 +131,7 @@ func TestMultiFindUsedBlobs(t *testing.T) {
var snapshotTrees restic.IDs
for i := 0; i < findTestSnapshots; i++ {
- sn := restic.TestCreateSnapshot(t, repo, findTestTime.Add(time.Duration(i)*time.Second), findTestDepth, 0)
+ sn := restic.TestCreateSnapshot(t, repo, findTestTime.Add(time.Duration(i)*time.Second), findTestDepth)
t.Logf("snapshot %v saved, tree %v", sn.ID().Str(), sn.Tree.Str())
snapshotTrees = append(snapshotTrees, *sn.Tree)
}
@@ -166,7 +166,7 @@ func (r ForbiddenRepo) LoadBlob(context.Context, restic.BlobType, restic.ID, []b
return nil, errors.New("should not be called")
}
-func (r ForbiddenRepo) LookupBlobSize(id restic.ID, tpe restic.BlobType) (uint, bool) {
+func (r ForbiddenRepo) LookupBlobSize(_ restic.ID, _ restic.BlobType) (uint, bool) {
return 0, false
}
@@ -177,7 +177,7 @@ func (r ForbiddenRepo) Connections() uint {
func TestFindUsedBlobsSkipsSeenBlobs(t *testing.T) {
repo := repository.TestRepository(t)
- snapshot := restic.TestCreateSnapshot(t, repo, findTestTime, findTestDepth, 0)
+ snapshot := restic.TestCreateSnapshot(t, repo, findTestTime, findTestDepth)
t.Logf("snapshot %v saved, tree %v", snapshot.ID().Str(), snapshot.Tree.Str())
usedBlobs := restic.NewBlobSet()
@@ -195,7 +195,7 @@ func TestFindUsedBlobsSkipsSeenBlobs(t *testing.T) {
func BenchmarkFindUsedBlobs(b *testing.B) {
repo := repository.TestRepository(b)
- sn := restic.TestCreateSnapshot(b, repo, findTestTime, findTestDepth, 0)
+ sn := restic.TestCreateSnapshot(b, repo, findTestTime, findTestDepth)
b.ResetTimer()
diff --git a/internal/restic/json.go b/internal/restic/json.go
index 6ad4b5f39..05d049b59 100644
--- a/internal/restic/json.go
+++ b/internal/restic/json.go
@@ -11,7 +11,7 @@ import (
// LoadJSONUnpacked decrypts the data and afterwards calls json.Unmarshal on
// the item.
func LoadJSONUnpacked(ctx context.Context, repo LoaderUnpacked, t FileType, id ID, item interface{}) (err error) {
- buf, err := repo.LoadUnpacked(ctx, t, id, nil)
+ buf, err := repo.LoadUnpacked(ctx, t, id)
if err != nil {
return err
}
diff --git a/internal/restic/lock.go b/internal/restic/lock.go
index d500c019a..a65ed6b5c 100644
--- a/internal/restic/lock.go
+++ b/internal/restic/lock.go
@@ -81,6 +81,8 @@ func IsInvalidLock(err error) bool {
return errors.As(err, &e)
}
+var ErrRemovedLock = errors.New("lock file was removed in the meantime")
+
// NewLock returns a new, non-exclusive lock for the repository. If an
// exclusive lock is already held by another process, it returns an error
// that satisfies IsAlreadyLocked.
@@ -274,6 +276,68 @@ func (l *Lock) Refresh(ctx context.Context) error {
return l.repo.Backend().Remove(context.TODO(), Handle{Type: LockFile, Name: oldLockID.String()})
}
+// RefreshStaleLock is an extended variant of Refresh that can also refresh stale lock files.
+func (l *Lock) RefreshStaleLock(ctx context.Context) error {
+ debug.Log("refreshing stale lock %v", l.lockID)
+ // refreshing a stale lock is possible if it still exists and continues to do
+ // so until after creating a new lock. The initial check avoids creating a new
+ // lock file if this lock was already removed in the meantime.
+ exists, err := l.checkExistence(ctx)
+ if err != nil {
+ return err
+ } else if !exists {
+ return ErrRemovedLock
+ }
+
+ l.lock.Lock()
+ l.Time = time.Now()
+ l.lock.Unlock()
+ id, err := l.createLock(ctx)
+ if err != nil {
+ return err
+ }
+
+ time.Sleep(waitBeforeLockCheck)
+
+ exists, err = l.checkExistence(ctx)
+ if err != nil {
+ // cleanup replacement lock
+ _ = l.repo.Backend().Remove(context.TODO(), Handle{Type: LockFile, Name: id.String()})
+ return err
+ }
+
+ if !exists {
+ // cleanup replacement lock
+ _ = l.repo.Backend().Remove(context.TODO(), Handle{Type: LockFile, Name: id.String()})
+ return ErrRemovedLock
+ }
+
+ l.lock.Lock()
+ defer l.lock.Unlock()
+
+ debug.Log("new lock ID %v", id)
+ oldLockID := l.lockID
+ l.lockID = &id
+
+ return l.repo.Backend().Remove(context.TODO(), Handle{Type: LockFile, Name: oldLockID.String()})
+}
+
+func (l *Lock) checkExistence(ctx context.Context) (bool, error) {
+ l.lock.Lock()
+ defer l.lock.Unlock()
+
+ exists := false
+
+ err := l.repo.Backend().List(ctx, LockFile, func(fi FileInfo) error {
+ if fi.Name == l.lockID.String() {
+ exists = true
+ }
+ return nil
+ })
+
+ return exists, err
+}
+
func (l *Lock) String() string {
l.lock.Lock()
defer l.lock.Unlock()
diff --git a/internal/restic/lock_test.go b/internal/restic/lock_test.go
index 2d14499bd..f3c405c9c 100644
--- a/internal/restic/lock_test.go
+++ b/internal/restic/lock_test.go
@@ -16,6 +16,7 @@ import (
func TestLock(t *testing.T) {
repo := repository.TestRepository(t)
+ restic.TestSetLockTimeout(t, 5*time.Millisecond)
lock, err := restic.NewLock(context.TODO(), repo)
rtest.OK(t, err)
@@ -25,6 +26,7 @@ func TestLock(t *testing.T) {
func TestDoubleUnlock(t *testing.T) {
repo := repository.TestRepository(t)
+ restic.TestSetLockTimeout(t, 5*time.Millisecond)
lock, err := restic.NewLock(context.TODO(), repo)
rtest.OK(t, err)
@@ -38,6 +40,7 @@ func TestDoubleUnlock(t *testing.T) {
func TestMultipleLock(t *testing.T) {
repo := repository.TestRepository(t)
+ restic.TestSetLockTimeout(t, 5*time.Millisecond)
lock1, err := restic.NewLock(context.TODO(), repo)
rtest.OK(t, err)
@@ -63,6 +66,7 @@ func (be *failLockLoadingBackend) Load(ctx context.Context, h restic.Handle, len
func TestMultipleLockFailure(t *testing.T) {
be := &failLockLoadingBackend{Backend: mem.New()}
repo := repository.TestRepositoryWithBackend(t, be, 0)
+ restic.TestSetLockTimeout(t, 5*time.Millisecond)
lock1, err := restic.NewLock(context.TODO(), repo)
rtest.OK(t, err)
@@ -83,6 +87,7 @@ func TestLockExclusive(t *testing.T) {
func TestLockOnExclusiveLockedRepo(t *testing.T) {
repo := repository.TestRepository(t)
+ restic.TestSetLockTimeout(t, 5*time.Millisecond)
elock, err := restic.NewExclusiveLock(context.TODO(), repo)
rtest.OK(t, err)
@@ -99,6 +104,7 @@ func TestLockOnExclusiveLockedRepo(t *testing.T) {
func TestExclusiveLockOnLockedRepo(t *testing.T) {
repo := repository.TestRepository(t)
+ restic.TestSetLockTimeout(t, 5*time.Millisecond)
elock, err := restic.NewLock(context.TODO(), repo)
rtest.OK(t, err)
@@ -247,15 +253,10 @@ func TestRemoveAllLocks(t *testing.T) {
3, processed)
}
-func TestLockRefresh(t *testing.T) {
- repo := repository.TestRepository(t)
-
- lock, err := restic.NewLock(context.TODO(), repo)
- rtest.OK(t, err)
- time0 := lock.Time
-
+func checkSingleLock(t *testing.T, repo restic.Repository) restic.ID {
+ t.Helper()
var lockID *restic.ID
- err = repo.List(context.TODO(), restic.LockFile, func(id restic.ID, size int64) error {
+ err := repo.List(context.TODO(), restic.LockFile, func(id restic.ID, size int64) error {
if lockID != nil {
t.Error("more than one lock found")
}
@@ -265,27 +266,59 @@ func TestLockRefresh(t *testing.T) {
if err != nil {
t.Fatal(err)
}
+ if lockID == nil {
+ t.Fatal("no lock found")
+ }
+ return *lockID
+}
+
+func testLockRefresh(t *testing.T, refresh func(lock *restic.Lock) error) {
+ repo := repository.TestRepository(t)
+ restic.TestSetLockTimeout(t, 5*time.Millisecond)
+
+ lock, err := restic.NewLock(context.TODO(), repo)
+ rtest.OK(t, err)
+ time0 := lock.Time
+
+ lockID := checkSingleLock(t, repo)
time.Sleep(time.Millisecond)
- rtest.OK(t, lock.Refresh(context.TODO()))
+ rtest.OK(t, refresh(lock))
- var lockID2 *restic.ID
- err = repo.List(context.TODO(), restic.LockFile, func(id restic.ID, size int64) error {
- if lockID2 != nil {
- t.Error("more than one lock found")
- }
- lockID2 = &id
- return nil
- })
- if err != nil {
- t.Fatal(err)
- }
+ lockID2 := checkSingleLock(t, repo)
- rtest.Assert(t, !lockID.Equal(*lockID2),
+ rtest.Assert(t, !lockID.Equal(lockID2),
"expected a new ID after lock refresh, got the same")
- lock2, err := restic.LoadLock(context.TODO(), repo, *lockID2)
+ lock2, err := restic.LoadLock(context.TODO(), repo, lockID2)
rtest.OK(t, err)
rtest.Assert(t, lock2.Time.After(time0),
"expected a later timestamp after lock refresh")
rtest.OK(t, lock.Unlock())
}
+
+func TestLockRefresh(t *testing.T) {
+ testLockRefresh(t, func(lock *restic.Lock) error {
+ return lock.Refresh(context.TODO())
+ })
+}
+
+func TestLockRefreshStale(t *testing.T) {
+ testLockRefresh(t, func(lock *restic.Lock) error {
+ return lock.RefreshStaleLock(context.TODO())
+ })
+}
+
+func TestLockRefreshStaleMissing(t *testing.T) {
+ repo := repository.TestRepository(t)
+ restic.TestSetLockTimeout(t, 5*time.Millisecond)
+
+ lock, err := restic.NewLock(context.TODO(), repo)
+ rtest.OK(t, err)
+ lockID := checkSingleLock(t, repo)
+
+ // refresh must fail if lock was removed
+ rtest.OK(t, repo.Backend().Remove(context.TODO(), restic.Handle{Type: restic.LockFile, Name: lockID.String()}))
+ time.Sleep(time.Millisecond)
+ err = lock.RefreshStaleLock(context.TODO())
+ rtest.Assert(t, err == restic.ErrRemovedLock, "unexpected error, expected %v, got %v", restic.ErrRemovedLock, err)
+}
diff --git a/internal/restic/node.go b/internal/restic/node.go
index a1aff18ac..edb49bfca 100644
--- a/internal/restic/node.go
+++ b/internal/restic/node.go
@@ -10,6 +10,7 @@ import (
"sync"
"syscall"
"time"
+ "unicode/utf8"
"github.com/restic/restic/internal/errors"
@@ -27,21 +28,26 @@ type ExtendedAttribute struct {
// Node is a file, directory or other item in a backup.
type Node struct {
- Name string `json:"name"`
- Type string `json:"type"`
- Mode os.FileMode `json:"mode,omitempty"`
- ModTime time.Time `json:"mtime,omitempty"`
- AccessTime time.Time `json:"atime,omitempty"`
- ChangeTime time.Time `json:"ctime,omitempty"`
- UID uint32 `json:"uid"`
- GID uint32 `json:"gid"`
- User string `json:"user,omitempty"`
- Group string `json:"group,omitempty"`
- Inode uint64 `json:"inode,omitempty"`
- DeviceID uint64 `json:"device_id,omitempty"` // device id of the file, stat.st_dev
- Size uint64 `json:"size,omitempty"`
- Links uint64 `json:"links,omitempty"`
- LinkTarget string `json:"linktarget,omitempty"`
+ Name string `json:"name"`
+ Type string `json:"type"`
+ Mode os.FileMode `json:"mode,omitempty"`
+ ModTime time.Time `json:"mtime,omitempty"`
+ AccessTime time.Time `json:"atime,omitempty"`
+ ChangeTime time.Time `json:"ctime,omitempty"`
+ UID uint32 `json:"uid"`
+ GID uint32 `json:"gid"`
+ User string `json:"user,omitempty"`
+ Group string `json:"group,omitempty"`
+ Inode uint64 `json:"inode,omitempty"`
+ DeviceID uint64 `json:"device_id,omitempty"` // device id of the file, stat.st_dev
+ Size uint64 `json:"size,omitempty"`
+ Links uint64 `json:"links,omitempty"`
+ LinkTarget string `json:"linktarget,omitempty"`
+ // implicitly base64-encoded field. Only used while encoding, `linktarget_raw` will overwrite LinkTarget if present.
+ // This allows storing arbitrary byte-sequences, which are possible as symlink targets on unix systems,
+ // as LinkTarget without breaking backwards-compatibility.
+ // Must only be set of the linktarget cannot be encoded as valid utf8.
+ LinkTargetRaw []byte `json:"linktarget_raw,omitempty"`
ExtendedAttributes []ExtendedAttribute `json:"extended_attributes,omitempty"`
Device uint64 `json:"device,omitempty"` // in case of Type == "dev", stat.st_rdev
Content IDs `json:"content"`
@@ -344,6 +350,13 @@ func (node Node) MarshalJSON() ([]byte, error) {
nj := nodeJSON(node)
name := strconv.Quote(node.Name)
nj.Name = name[1 : len(name)-1]
+ if nj.LinkTargetRaw != nil {
+ panic("LinkTargetRaw must not be set manually")
+ }
+ if !utf8.ValidString(node.LinkTarget) {
+ // store raw bytes if invalid utf8
+ nj.LinkTargetRaw = []byte(node.LinkTarget)
+ }
return json.Marshal(nj)
}
@@ -358,7 +371,14 @@ func (node *Node) UnmarshalJSON(data []byte) error {
}
nj.Name, err = strconv.Unquote(`"` + nj.Name + `"`)
- return errors.Wrap(err, "Unquote")
+ if err != nil {
+ return errors.Wrap(err, "Unquote")
+ }
+ if nj.LinkTargetRaw != nil {
+ nj.LinkTarget = string(nj.LinkTargetRaw)
+ nj.LinkTargetRaw = nil
+ }
+ return nil
}
func (node Node) Equals(other Node) bool {
@@ -605,18 +625,10 @@ func (node *Node) fillExtra(path string, fi os.FileInfo) error {
return errors.Errorf("invalid node type %q", node.Type)
}
- if err := node.fillExtendedAttributes(path); err != nil {
- return err
- }
-
- return nil
+ return node.fillExtendedAttributes(path)
}
func (node *Node) fillExtendedAttributes(path string) error {
- if node.Type == "symlink" {
- return nil
- }
-
xattrs, err := Listxattr(path)
debug.Log("fillExtendedAttributes(%v) %v %v", path, xattrs, err)
if err != nil {
diff --git a/internal/restic/node_test.go b/internal/restic/node_test.go
index 60342e9a4..aae010421 100644
--- a/internal/restic/node_test.go
+++ b/internal/restic/node_test.go
@@ -2,13 +2,17 @@ package restic_test
import (
"context"
+ "encoding/json"
+ "fmt"
"os"
"path/filepath"
+ "reflect"
"runtime"
"testing"
"time"
"github.com/restic/restic/internal/restic"
+ "github.com/restic/restic/internal/test"
rtest "github.com/restic/restic/internal/test"
)
@@ -163,58 +167,99 @@ var nodeTests = []restic.Node{
AccessTime: parseTime("2005-05-14 21:07:04.222"),
ChangeTime: parseTime("2005-05-14 21:07:05.333"),
},
+ {
+ Name: "testXattrFile",
+ Type: "file",
+ Content: restic.IDs{},
+ UID: uint32(os.Getuid()),
+ GID: uint32(os.Getgid()),
+ Mode: 0604,
+ ModTime: parseTime("2005-05-14 21:07:03.111"),
+ AccessTime: parseTime("2005-05-14 21:07:04.222"),
+ ChangeTime: parseTime("2005-05-14 21:07:05.333"),
+ ExtendedAttributes: []restic.ExtendedAttribute{
+ {"user.foo", []byte("bar")},
+ },
+ },
+ {
+ Name: "testXattrDir",
+ Type: "dir",
+ Subtree: nil,
+ UID: uint32(os.Getuid()),
+ GID: uint32(os.Getgid()),
+ Mode: 0750 | os.ModeDir,
+ ModTime: parseTime("2005-05-14 21:07:03.111"),
+ AccessTime: parseTime("2005-05-14 21:07:04.222"),
+ ChangeTime: parseTime("2005-05-14 21:07:05.333"),
+ ExtendedAttributes: []restic.ExtendedAttribute{
+ {"user.foo", []byte("bar")},
+ },
+ },
}
func TestNodeRestoreAt(t *testing.T) {
- tempdir, err := os.MkdirTemp(rtest.TestTempDir, "restic-test-")
- rtest.OK(t, err)
-
- defer func() {
- if rtest.TestCleanupTempDirs {
- rtest.RemoveAll(t, tempdir)
- } else {
- t.Logf("leaving tempdir at %v", tempdir)
- }
- }()
+ tempdir := t.TempDir()
for _, test := range nodeTests {
- nodePath := filepath.Join(tempdir, test.Name)
- rtest.OK(t, test.CreateAt(context.TODO(), nodePath, nil))
- rtest.OK(t, test.RestoreMetadata(nodePath))
-
- if test.Type == "dir" {
- rtest.OK(t, test.RestoreTimestamps(nodePath))
- }
+ t.Run("", func(t *testing.T) {
+ var nodePath string
+ if test.ExtendedAttributes != nil {
+ if runtime.GOOS == "windows" {
+ // restic does not support xattrs on windows
+ return
+ }
- fi, err := os.Lstat(nodePath)
- rtest.OK(t, err)
+ // tempdir might be backed by a filesystem that does not support
+ // extended attributes
+ nodePath = test.Name
+ defer func() {
+ _ = os.Remove(nodePath)
+ }()
+ } else {
+ nodePath = filepath.Join(tempdir, test.Name)
+ }
+ rtest.OK(t, test.CreateAt(context.TODO(), nodePath, nil))
+ rtest.OK(t, test.RestoreMetadata(nodePath))
- n2, err := restic.NodeFromFileInfo(nodePath, fi)
- rtest.OK(t, err)
+ if test.Type == "dir" {
+ rtest.OK(t, test.RestoreTimestamps(nodePath))
+ }
- rtest.Assert(t, test.Name == n2.Name,
- "%v: name doesn't match (%v != %v)", test.Type, test.Name, n2.Name)
- rtest.Assert(t, test.Type == n2.Type,
- "%v: type doesn't match (%v != %v)", test.Type, test.Type, n2.Type)
- rtest.Assert(t, test.Size == n2.Size,
- "%v: size doesn't match (%v != %v)", test.Size, test.Size, n2.Size)
-
- if runtime.GOOS != "windows" {
- rtest.Assert(t, test.UID == n2.UID,
- "%v: UID doesn't match (%v != %v)", test.Type, test.UID, n2.UID)
- rtest.Assert(t, test.GID == n2.GID,
- "%v: GID doesn't match (%v != %v)", test.Type, test.GID, n2.GID)
- if test.Type != "symlink" {
- // On OpenBSD only root can set sticky bit (see sticky(8)).
- if runtime.GOOS != "openbsd" && runtime.GOOS != "netbsd" && runtime.GOOS != "solaris" && test.Name == "testSticky" {
- rtest.Assert(t, test.Mode == n2.Mode,
- "%v: mode doesn't match (0%o != 0%o)", test.Type, test.Mode, n2.Mode)
+ fi, err := os.Lstat(nodePath)
+ rtest.OK(t, err)
+
+ n2, err := restic.NodeFromFileInfo(nodePath, fi)
+ rtest.OK(t, err)
+
+ rtest.Assert(t, test.Name == n2.Name,
+ "%v: name doesn't match (%v != %v)", test.Type, test.Name, n2.Name)
+ rtest.Assert(t, test.Type == n2.Type,
+ "%v: type doesn't match (%v != %v)", test.Type, test.Type, n2.Type)
+ rtest.Assert(t, test.Size == n2.Size,
+ "%v: size doesn't match (%v != %v)", test.Size, test.Size, n2.Size)
+
+ if runtime.GOOS != "windows" {
+ rtest.Assert(t, test.UID == n2.UID,
+ "%v: UID doesn't match (%v != %v)", test.Type, test.UID, n2.UID)
+ rtest.Assert(t, test.GID == n2.GID,
+ "%v: GID doesn't match (%v != %v)", test.Type, test.GID, n2.GID)
+ if test.Type != "symlink" {
+ // On OpenBSD only root can set sticky bit (see sticky(8)).
+ if runtime.GOOS != "openbsd" && runtime.GOOS != "netbsd" && runtime.GOOS != "solaris" && test.Name == "testSticky" {
+ rtest.Assert(t, test.Mode == n2.Mode,
+ "%v: mode doesn't match (0%o != 0%o)", test.Type, test.Mode, n2.Mode)
+ }
}
}
- }
- AssertFsTimeEqual(t, "AccessTime", test.Type, test.AccessTime, n2.AccessTime)
- AssertFsTimeEqual(t, "ModTime", test.Type, test.ModTime, n2.ModTime)
+ AssertFsTimeEqual(t, "AccessTime", test.Type, test.AccessTime, n2.AccessTime)
+ AssertFsTimeEqual(t, "ModTime", test.Type, test.ModTime, n2.ModTime)
+ if len(n2.ExtendedAttributes) == 0 {
+ n2.ExtendedAttributes = nil
+ }
+ rtest.Assert(t, reflect.DeepEqual(test.ExtendedAttributes, n2.ExtendedAttributes),
+ "%v: xattrs don't match (%v != %v)", test.Name, test.ExtendedAttributes, n2.ExtendedAttributes)
+ })
}
}
@@ -292,3 +337,38 @@ func TestFixTime(t *testing.T) {
})
}
}
+
+func TestSymlinkSerialization(t *testing.T) {
+ for _, link := range []string{
+ "válîd \t Üñi¢òde \n śẗŕinǵ",
+ string([]byte{0, 1, 2, 0xfa, 0xfb, 0xfc}),
+ } {
+ n := restic.Node{
+ LinkTarget: link,
+ }
+ ser, err := json.Marshal(n)
+ test.OK(t, err)
+ var n2 restic.Node
+ err = json.Unmarshal(ser, &n2)
+ test.OK(t, err)
+ fmt.Println(string(ser))
+
+ test.Equals(t, n.LinkTarget, n2.LinkTarget)
+ }
+}
+
+func TestSymlinkSerializationFormat(t *testing.T) {
+ for _, d := range []struct {
+ ser string
+ linkTarget string
+ }{
+ {`{"linktarget":"test"}`, "test"},
+ {`{"linktarget":"\u0000\u0001\u0002\ufffd\ufffd\ufffd","linktarget_raw":"AAEC+vv8"}`, string([]byte{0, 1, 2, 0xfa, 0xfb, 0xfc})},
+ } {
+ var n2 restic.Node
+ err := json.Unmarshal([]byte(d.ser), &n2)
+ test.OK(t, err)
+ test.Equals(t, d.linkTarget, n2.LinkTarget)
+ test.Assert(t, n2.LinkTargetRaw == nil, "quoted link target is just a helper field and must be unset after decoding")
+ }
+}
diff --git a/internal/restic/node_unix_test.go b/internal/restic/node_unix_test.go
index c4fef3710..374326bf7 100644
--- a/internal/restic/node_unix_test.go
+++ b/internal/restic/node_unix_test.go
@@ -5,10 +5,13 @@ package restic
import (
"os"
+ "path/filepath"
"runtime"
"syscall"
"testing"
"time"
+
+ rtest "github.com/restic/restic/internal/test"
)
func stat(t testing.TB, filename string) (fi os.FileInfo, ok bool) {
@@ -25,6 +28,7 @@ func stat(t testing.TB, filename string) (fi os.FileInfo, ok bool) {
}
func checkFile(t testing.TB, stat *syscall.Stat_t, node *Node) {
+ t.Helper()
if uint32(node.Mode.Perm()) != uint32(stat.Mode&0777) {
t.Errorf("Mode does not match, want %v, got %v", stat.Mode&0777, node.Mode)
}
@@ -37,7 +41,7 @@ func checkFile(t testing.TB, stat *syscall.Stat_t, node *Node) {
t.Errorf("Dev does not match, want %v, got %v", stat.Dev, node.DeviceID)
}
- if node.Size != uint64(stat.Size) {
+ if node.Size != uint64(stat.Size) && node.Type != "symlink" {
t.Errorf("Size does not match, want %v, got %v", stat.Size, node.Size)
}
@@ -83,6 +87,10 @@ func checkDevice(t testing.TB, stat *syscall.Stat_t, node *Node) {
}
func TestNodeFromFileInfo(t *testing.T) {
+ tmp := t.TempDir()
+ symlink := filepath.Join(tmp, "symlink")
+ rtest.OK(t, os.Symlink("target", symlink))
+
type Test struct {
filename string
canSkip bool
@@ -90,6 +98,7 @@ func TestNodeFromFileInfo(t *testing.T) {
var tests = []Test{
{"node_test.go", false},
{"/dev/sda", true},
+ {symlink, false},
}
// on darwin, users are not permitted to list the extended attributes of
@@ -125,7 +134,7 @@ func TestNodeFromFileInfo(t *testing.T) {
}
switch node.Type {
- case "file":
+ case "file", "symlink":
checkFile(t, s, node)
case "dev", "chardev":
checkFile(t, s, node)
diff --git a/internal/restic/node_xattr.go b/internal/restic/node_xattr.go
index a2eed39c0..ea9eafe94 100644
--- a/internal/restic/node_xattr.go
+++ b/internal/restic/node_xattr.go
@@ -13,20 +13,20 @@ import (
// Getxattr retrieves extended attribute data associated with path.
func Getxattr(path, name string) ([]byte, error) {
- b, err := xattr.Get(path, name)
+ b, err := xattr.LGet(path, name)
return b, handleXattrErr(err)
}
// Listxattr retrieves a list of names of extended attributes associated with the
// given path in the file system.
func Listxattr(path string) ([]string, error) {
- l, err := xattr.List(path)
+ l, err := xattr.LList(path)
return l, handleXattrErr(err)
}
// Setxattr associates name and data together as an attribute of path.
func Setxattr(path, name string, data []byte) error {
- return handleXattrErr(xattr.Set(path, name, data))
+ return handleXattrErr(xattr.LSet(path, name, data))
}
func handleXattrErr(err error) error {
diff --git a/internal/restic/parallel.go b/internal/restic/parallel.go
index df160f018..34a2a019c 100644
--- a/internal/restic/parallel.go
+++ b/internal/restic/parallel.go
@@ -41,7 +41,7 @@ func ParallelList(ctx context.Context, r Lister, t FileType, parallelism uint, f
// a worker receives an index ID from ch, loads the index, and sends it to indexCh
worker := func() error {
for fi := range ch {
- debug.Log("worker got file %v", fi.ID.Str())
+ debug.Log("worker got file %v/%v", t, fi.ID.Str())
err := fn(ctx, fi.ID, fi.Size)
if err != nil {
return err
diff --git a/internal/restic/repository.go b/internal/restic/repository.go
index e01d204e6..6990200e4 100644
--- a/internal/restic/repository.go
+++ b/internal/restic/repository.go
@@ -51,10 +51,8 @@ type Repository interface {
StartPackUploader(ctx context.Context, wg *errgroup.Group)
Flush(context.Context) error
- // LoadUnpacked loads and decrypts the file with the given type and ID,
- // using the supplied buffer (which must be empty). If the buffer is nil, a
- // new buffer will be allocated and returned.
- LoadUnpacked(ctx context.Context, t FileType, id ID, buf []byte) (data []byte, err error)
+ // LoadUnpacked loads and decrypts the file with the given type and ID.
+ LoadUnpacked(ctx context.Context, t FileType, id ID) (data []byte, err error)
SaveUnpacked(context.Context, FileType, []byte) (ID, error)
}
@@ -67,7 +65,7 @@ type Lister interface {
type LoaderUnpacked interface {
// Connections returns the maximum number of concurrent backend operations
Connections() uint
- LoadUnpacked(ctx context.Context, t FileType, id ID, buf []byte) (data []byte, err error)
+ LoadUnpacked(ctx context.Context, t FileType, id ID) (data []byte, err error)
}
// SaverUnpacked allows saving a blob not stored in a pack file
diff --git a/internal/restic/snapshot.go b/internal/restic/snapshot.go
index 58d863526..13e795ec8 100644
--- a/internal/restic/snapshot.go
+++ b/internal/restic/snapshot.go
@@ -25,6 +25,8 @@ type Snapshot struct {
Tags []string `json:"tags,omitempty"`
Original *ID `json:"original,omitempty"`
+ ProgramVersion string `json:"program_version,omitempty"`
+
id *ID // plaintext ID, used during restore
}
@@ -61,7 +63,7 @@ func LoadSnapshot(ctx context.Context, loader LoaderUnpacked, id ID) (*Snapshot,
sn := &Snapshot{id: &id}
err := LoadJSONUnpacked(ctx, loader, SnapshotFile, id, sn)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("failed to load snapshot %v: %w", id.Str(), err)
}
return sn, nil
diff --git a/internal/restic/snapshot_find.go b/internal/restic/snapshot_find.go
index 8d6f8c4b1..cb761aee3 100644
--- a/internal/restic/snapshot_find.go
+++ b/internal/restic/snapshot_find.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"path/filepath"
+ "strings"
"time"
"github.com/restic/restic/internal/errors"
@@ -82,37 +83,48 @@ func (f *SnapshotFilter) findLatest(ctx context.Context, be Lister, loader Loade
return latest, nil
}
+func splitSnapshotID(s string) (id, subfolder string) {
+ id, subfolder, _ = strings.Cut(s, ":")
+ return
+}
+
// FindSnapshot takes a string and tries to find a snapshot whose ID matches
// the string as closely as possible.
-func FindSnapshot(ctx context.Context, be Lister, loader LoaderUnpacked, s string) (*Snapshot, error) {
+func FindSnapshot(ctx context.Context, be Lister, loader LoaderUnpacked, s string) (*Snapshot, string, error) {
+ s, subfolder := splitSnapshotID(s)
+
// no need to list snapshots if `s` is already a full id
id, err := ParseID(s)
if err != nil {
// find snapshot id with prefix
id, err = Find(ctx, be, SnapshotFile, s)
if err != nil {
- return nil, err
+ return nil, "", err
}
}
- return LoadSnapshot(ctx, loader, id)
+ sn, err := LoadSnapshot(ctx, loader, id)
+ return sn, subfolder, err
}
// FindLatest returns either the latest of a filtered list of all snapshots
// or a snapshot specified by `snapshotID`.
-func (f *SnapshotFilter) FindLatest(ctx context.Context, be Lister, loader LoaderUnpacked, snapshotID string) (*Snapshot, error) {
- if snapshotID == "latest" {
+func (f *SnapshotFilter) FindLatest(ctx context.Context, be Lister, loader LoaderUnpacked, snapshotID string) (*Snapshot, string, error) {
+ id, subfolder := splitSnapshotID(snapshotID)
+ if id == "latest" {
sn, err := f.findLatest(ctx, be, loader)
if err == ErrNoSnapshotFound {
err = fmt.Errorf("snapshot filter (Paths:%v Tags:%v Hosts:%v): %w",
f.Paths, f.Tags, f.Hosts, err)
}
- return sn, err
+ return sn, subfolder, err
}
return FindSnapshot(ctx, be, loader, snapshotID)
}
type SnapshotFindCb func(string, *Snapshot, error) error
+var ErrInvalidSnapshotSyntax = errors.New("<snapshot>:<subfolder> syntax not allowed")
+
// FindAll yields Snapshots, either given explicitly by `snapshotIDs` or filtered from the list of all snapshots.
func (f *SnapshotFilter) FindAll(ctx context.Context, be Lister, loader LoaderUnpacked, snapshotIDs []string, fn SnapshotFindCb) error {
if len(snapshotIDs) != 0 {
@@ -138,15 +150,20 @@ func (f *SnapshotFilter) FindAll(ctx context.Context, be Lister, loader LoaderUn
if sn != nil {
ids.Insert(*sn.ID())
}
+ } else if strings.HasPrefix(s, "latest:") {
+ err = ErrInvalidSnapshotSyntax
} else {
- sn, err = FindSnapshot(ctx, be, loader, s)
- if err == nil {
+ var subfolder string
+ sn, subfolder, err = FindSnapshot(ctx, be, loader, s)
+ if err == nil && subfolder != "" {
+ err = ErrInvalidSnapshotSyntax
+ } else if err == nil {
if ids.Has(*sn.ID()) {
continue
- } else {
- ids.Insert(*sn.ID())
- s = sn.ID().String()
}
+
+ ids.Insert(*sn.ID())
+ s = sn.ID().String()
}
}
err = fn(s, sn, err)
diff --git a/internal/restic/snapshot_find_test.go b/internal/restic/snapshot_find_test.go
index d098b5224..2f16dcb2f 100644
--- a/internal/restic/snapshot_find_test.go
+++ b/internal/restic/snapshot_find_test.go
@@ -6,16 +6,17 @@ import (
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
+ "github.com/restic/restic/internal/test"
)
func TestFindLatestSnapshot(t *testing.T) {
repo := repository.TestRepository(t)
- restic.TestCreateSnapshot(t, repo, parseTimeUTC("2015-05-05 05:05:05"), 1, 0)
- restic.TestCreateSnapshot(t, repo, parseTimeUTC("2017-07-07 07:07:07"), 1, 0)
- latestSnapshot := restic.TestCreateSnapshot(t, repo, parseTimeUTC("2019-09-09 09:09:09"), 1, 0)
+ restic.TestCreateSnapshot(t, repo, parseTimeUTC("2015-05-05 05:05:05"), 1)
+ restic.TestCreateSnapshot(t, repo, parseTimeUTC("2017-07-07 07:07:07"), 1)
+ latestSnapshot := restic.TestCreateSnapshot(t, repo, parseTimeUTC("2019-09-09 09:09:09"), 1)
f := restic.SnapshotFilter{Hosts: []string{"foo"}}
- sn, err := f.FindLatest(context.TODO(), repo.Backend(), repo, "latest")
+ sn, _, err := f.FindLatest(context.TODO(), repo.Backend(), repo, "latest")
if err != nil {
t.Fatalf("FindLatest returned error: %v", err)
}
@@ -27,11 +28,11 @@ func TestFindLatestSnapshot(t *testing.T) {
func TestFindLatestSnapshotWithMaxTimestamp(t *testing.T) {
repo := repository.TestRepository(t)
- restic.TestCreateSnapshot(t, repo, parseTimeUTC("2015-05-05 05:05:05"), 1, 0)
- desiredSnapshot := restic.TestCreateSnapshot(t, repo, parseTimeUTC("2017-07-07 07:07:07"), 1, 0)
- restic.TestCreateSnapshot(t, repo, parseTimeUTC("2019-09-09 09:09:09"), 1, 0)
+ restic.TestCreateSnapshot(t, repo, parseTimeUTC("2015-05-05 05:05:05"), 1)
+ desiredSnapshot := restic.TestCreateSnapshot(t, repo, parseTimeUTC("2017-07-07 07:07:07"), 1)
+ restic.TestCreateSnapshot(t, repo, parseTimeUTC("2019-09-09 09:09:09"), 1)
- sn, err := (&restic.SnapshotFilter{
+ sn, _, err := (&restic.SnapshotFilter{
Hosts: []string{"foo"},
TimestampLimit: parseTimeUTC("2018-08-08 08:08:08"),
}).FindLatest(context.TODO(), repo.Backend(), repo, "latest")
@@ -43,3 +44,48 @@ func TestFindLatestSnapshotWithMaxTimestamp(t *testing.T) {
t.Errorf("FindLatest returned wrong snapshot ID: %v", *sn.ID())
}
}
+
+func TestFindLatestWithSubpath(t *testing.T) {
+ repo := repository.TestRepository(t)
+ restic.TestCreateSnapshot(t, repo, parseTimeUTC("2015-05-05 05:05:05"), 1)
+ desiredSnapshot := restic.TestCreateSnapshot(t, repo, parseTimeUTC("2017-07-07 07:07:07"), 1)
+
+ for _, exp := range []struct {
+ query string
+ subfolder string
+ }{
+ {"latest", ""},
+ {"latest:subfolder", "subfolder"},
+ {desiredSnapshot.ID().Str(), ""},
+ {desiredSnapshot.ID().Str() + ":subfolder", "subfolder"},
+ {desiredSnapshot.ID().String(), ""},
+ {desiredSnapshot.ID().String() + ":subfolder", "subfolder"},
+ } {
+ t.Run("", func(t *testing.T) {
+ sn, subfolder, err := (&restic.SnapshotFilter{}).FindLatest(context.TODO(), repo.Backend(), repo, exp.query)
+ if err != nil {
+ t.Fatalf("FindLatest returned error: %v", err)
+ }
+
+ test.Assert(t, *sn.ID() == *desiredSnapshot.ID(), "FindLatest returned wrong snapshot ID: %v", *sn.ID())
+ test.Assert(t, subfolder == exp.subfolder, "FindLatest returned wrong path in snapshot: %v", subfolder)
+ })
+ }
+}
+
+func TestFindAllSubpathError(t *testing.T) {
+ repo := repository.TestRepository(t)
+ desiredSnapshot := restic.TestCreateSnapshot(t, repo, parseTimeUTC("2017-07-07 07:07:07"), 1)
+
+ count := 0
+ test.OK(t, (&restic.SnapshotFilter{}).FindAll(context.TODO(), repo.Backend(), repo,
+ []string{"latest:subfolder", desiredSnapshot.ID().Str() + ":subfolder"},
+ func(id string, sn *restic.Snapshot, err error) error {
+ if err == restic.ErrInvalidSnapshotSyntax {
+ count++
+ return nil
+ }
+ return err
+ }))
+ test.Assert(t, count == 2, "unexpected number of subfolder errors: %v, wanted %v", count, 2)
+}
diff --git a/internal/restic/snapshot_group.go b/internal/restic/snapshot_group.go
index c8b1a5faa..964a230b3 100644
--- a/internal/restic/snapshot_group.go
+++ b/internal/restic/snapshot_group.go
@@ -2,12 +2,62 @@ package restic
import (
"encoding/json"
+ "fmt"
"sort"
"strings"
-
- "github.com/restic/restic/internal/errors"
)
+type SnapshotGroupByOptions struct {
+ Tag bool
+ Host bool
+ Path bool
+}
+
+func splitSnapshotGroupBy(s string) (SnapshotGroupByOptions, error) {
+ var l SnapshotGroupByOptions
+ for _, option := range strings.Split(s, ",") {
+ switch option {
+ case "host", "hosts":
+ l.Host = true
+ case "path", "paths":
+ l.Path = true
+ case "tag", "tags":
+ l.Tag = true
+ case "":
+ default:
+ return SnapshotGroupByOptions{}, fmt.Errorf("unknown grouping option: %q", option)
+ }
+ }
+ return l, nil
+}
+
+func (l SnapshotGroupByOptions) String() string {
+ var parts []string
+ if l.Host {
+ parts = append(parts, "host")
+ }
+ if l.Path {
+ parts = append(parts, "paths")
+ }
+ if l.Tag {
+ parts = append(parts, "tags")
+ }
+ return strings.Join(parts, ",")
+}
+
+func (l *SnapshotGroupByOptions) Set(s string) error {
+ parts, err := splitSnapshotGroupBy(s)
+ if err != nil {
+ return err
+ }
+ *l = parts
+ return nil
+}
+
+func (l *SnapshotGroupByOptions) Type() string {
+ return "group"
+}
+
// SnapshotGroupKey is the structure for identifying groups in a grouped
// snapshot list. This is used by GroupSnapshots()
type SnapshotGroupKey struct {
@@ -17,44 +67,25 @@ type SnapshotGroupKey struct {
}
// GroupSnapshots takes a list of snapshots and a grouping criteria and creates
-// a group list of snapshots.
-func GroupSnapshots(snapshots Snapshots, options string) (map[string]Snapshots, bool, error) {
+// a grouped list of snapshots.
+func GroupSnapshots(snapshots Snapshots, groupBy SnapshotGroupByOptions) (map[string]Snapshots, bool, error) {
// group by hostname and dirs
snapshotGroups := make(map[string]Snapshots)
- var GroupByTag bool
- var GroupByHost bool
- var GroupByPath bool
- GroupOptionList := strings.Split(options, ",")
-
- for _, option := range GroupOptionList {
- switch option {
- case "host", "hosts":
- GroupByHost = true
- case "path", "paths":
- GroupByPath = true
- case "tag", "tags":
- GroupByTag = true
- case "":
- default:
- return nil, false, errors.Fatal("unknown grouping option: '" + option + "'")
- }
- }
-
for _, sn := range snapshots {
// Determining grouping-keys
var tags []string
var hostname string
var paths []string
- if GroupByTag {
+ if groupBy.Tag {
tags = sn.Tags
sort.Strings(tags)
}
- if GroupByHost {
+ if groupBy.Host {
hostname = sn.Hostname
}
- if GroupByPath {
+ if groupBy.Path {
paths = sn.Paths
}
@@ -70,5 +101,5 @@ func GroupSnapshots(snapshots Snapshots, options string) (map[string]Snapshots,
snapshotGroups[string(k)] = append(snapshotGroups[string(k)], sn)
}
- return snapshotGroups, GroupByTag || GroupByHost || GroupByPath, nil
+ return snapshotGroups, groupBy.Tag || groupBy.Host || groupBy.Path, nil
}
diff --git a/internal/restic/snapshot_group_test.go b/internal/restic/snapshot_group_test.go
new file mode 100644
index 000000000..78ac99ab1
--- /dev/null
+++ b/internal/restic/snapshot_group_test.go
@@ -0,0 +1,50 @@
+package restic_test
+
+import (
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/restic/restic/internal/restic"
+ "github.com/restic/restic/internal/test"
+)
+
+func TestGroupByOptions(t *testing.T) {
+ for _, exp := range []struct {
+ from string
+ opts restic.SnapshotGroupByOptions
+ normalized string
+ }{
+ {
+ from: "",
+ opts: restic.SnapshotGroupByOptions{},
+ normalized: "",
+ },
+ {
+ from: "host,paths",
+ opts: restic.SnapshotGroupByOptions{Host: true, Path: true},
+ normalized: "host,paths",
+ },
+ {
+ from: "host,path,tag",
+ opts: restic.SnapshotGroupByOptions{Host: true, Path: true, Tag: true},
+ normalized: "host,paths,tags",
+ },
+ {
+ from: "hosts,paths,tags",
+ opts: restic.SnapshotGroupByOptions{Host: true, Path: true, Tag: true},
+ normalized: "host,paths,tags",
+ },
+ } {
+ var opts restic.SnapshotGroupByOptions
+ test.OK(t, opts.Set(exp.from))
+ if !cmp.Equal(opts, exp.opts) {
+ t.Errorf("unexpeted opts %s", cmp.Diff(opts, exp.opts))
+ }
+ test.Equals(t, opts.String(), exp.normalized)
+ }
+
+ var opts restic.SnapshotGroupByOptions
+ err := opts.Set("tags,invalid")
+ test.Assert(t, err != nil, "missing error on invalid tags")
+ test.Assert(t, !opts.Host && !opts.Path && !opts.Tag, "unexpected opts %s %s %s", opts.Host, opts.Path, opts.Tag)
+}
diff --git a/internal/restic/snapshot_policy.go b/internal/restic/snapshot_policy.go
index 3271140aa..0ff0c5ec8 100644
--- a/internal/restic/snapshot_policy.go
+++ b/internal/restic/snapshot_policy.go
@@ -31,23 +31,22 @@ func (e ExpirePolicy) String() (s string) {
var keeps []string
var keepw []string
- if e.Last > 0 {
- keeps = append(keeps, fmt.Sprintf("%d latest", e.Last))
- }
- if e.Hourly > 0 {
- keeps = append(keeps, fmt.Sprintf("%d hourly", e.Hourly))
- }
- if e.Daily > 0 {
- keeps = append(keeps, fmt.Sprintf("%d daily", e.Daily))
- }
- if e.Weekly > 0 {
- keeps = append(keeps, fmt.Sprintf("%d weekly", e.Weekly))
- }
- if e.Monthly > 0 {
- keeps = append(keeps, fmt.Sprintf("%d monthly", e.Monthly))
- }
- if e.Yearly > 0 {
- keeps = append(keeps, fmt.Sprintf("%d yearly", e.Yearly))
+ for _, opt := range []struct {
+ count int
+ descr string
+ }{
+ {e.Last, "latest"},
+ {e.Hourly, "hourly"},
+ {e.Daily, "daily"},
+ {e.Weekly, "weekly"},
+ {e.Monthly, "monthly"},
+ {e.Yearly, "yearly"},
+ } {
+ if opt.count > 0 {
+ keeps = append(keeps, fmt.Sprintf("%d %s", opt.count, opt.descr))
+ } else if opt.count == -1 {
+ keeps = append(keeps, fmt.Sprintf("all %s", opt.descr))
+ }
}
if !e.WithinHourly.Zero() {
@@ -100,13 +99,7 @@ func (e ExpirePolicy) String() (s string) {
return s
}
-// Sum returns the maximum number of snapshots to be kept according to this
-// policy.
-func (e ExpirePolicy) Sum() int {
- return e.Last + e.Hourly + e.Daily + e.Weekly + e.Monthly + e.Yearly
-}
-
-// Empty returns true iff no policy has been configured (all values zero).
+// Empty returns true if no policy has been configured (all values zero).
func (e ExpirePolicy) Empty() bool {
if len(e.Tags) != 0 {
return false
@@ -143,7 +136,7 @@ func y(d time.Time, _ int) int {
}
// always returns a unique number for d.
-func always(d time.Time, nr int) int {
+func always(_ time.Time, nr int) int {
return nr
}
@@ -190,6 +183,7 @@ type KeepReason struct {
// according to the policy p. list is sorted in the process. reasons contains
// the reasons to keep each snapshot, it is in the same order as keep.
func ApplyPolicy(list Snapshots, p ExpirePolicy) (keep, remove Snapshots, reasons []KeepReason) {
+ // sort newest snapshots first
sort.Stable(list)
if p.Empty() {
@@ -260,13 +254,18 @@ func ApplyPolicy(list Snapshots, p ExpirePolicy) (keep, remove Snapshots, reason
// Now update the other buckets and see if they have some counts left.
for i, b := range buckets {
- if b.Count > 0 {
+ // -1 means "keep all"
+ if b.Count > 0 || b.Count == -1 {
val := b.bucker(cur.Time, nr)
- if val != b.Last {
+ // also keep the oldest snapshot if the bucket has some counts left. This maximizes the
+ // the history length kept while some counts are left.
+ if val != b.Last || nr == len(list)-1 {
debug.Log("keep %v %v, bucker %v, val %v\n", cur.Time, cur.id.Str(), i, val)
keepSnap = true
buckets[i].Last = val
- buckets[i].Count--
+ if buckets[i].Count > 0 {
+ buckets[i].Count--
+ }
keepSnapReasons = append(keepSnapReasons, b.reason)
}
}
@@ -279,7 +278,7 @@ func ApplyPolicy(list Snapshots, p ExpirePolicy) (keep, remove Snapshots, reason
if cur.Time.After(t) {
val := b.bucker(cur.Time, nr)
- if val != b.Last {
+ if val != b.Last || nr == len(list)-1 {
debug.Log("keep %v, time %v, ID %v, bucker %v, val %v %v\n", b.reason, cur.Time, cur.id.Str(), i, val, b.Last)
keepSnap = true
bucketsWithin[i].Last = val
diff --git a/internal/restic/snapshot_policy_test.go b/internal/restic/snapshot_policy_test.go
index 918ea4ec7..75f0f18f4 100644
--- a/internal/restic/snapshot_policy_test.go
+++ b/internal/restic/snapshot_policy_test.go
@@ -22,13 +22,14 @@ func parseTimeUTC(s string) time.Time {
return t.UTC()
}
-func parseDuration(s string) restic.Duration {
- d, err := restic.ParseDuration(s)
- if err != nil {
- panic(err)
+// Returns the maximum number of snapshots to be kept according to this policy.
+// If any of the counts is -1 it will return 0.
+func policySum(e *restic.ExpirePolicy) int {
+ if e.Last == -1 || e.Hourly == -1 || e.Daily == -1 || e.Weekly == -1 || e.Monthly == -1 || e.Yearly == -1 {
+ return 0
}
- return d
+ return e.Last + e.Hourly + e.Daily + e.Weekly + e.Monthly + e.Yearly
}
func TestExpireSnapshotOps(t *testing.T) {
@@ -46,7 +47,7 @@ func TestExpireSnapshotOps(t *testing.T) {
if isEmpty != d.expectEmpty {
t.Errorf("empty test %v: wrong result, want:\n %#v\ngot:\n %#v", i, d.expectEmpty, isEmpty)
}
- hasSum := d.p.Sum()
+ hasSum := policySum(d.p)
if hasSum != d.expectSum {
t.Errorf("sum test %v: wrong result, want:\n %#v\ngot:\n %#v", i, d.expectSum, hasSum)
}
@@ -219,26 +220,30 @@ func TestApplyPolicy(t *testing.T) {
{Tags: []restic.TagList{{"foo"}}},
{Tags: []restic.TagList{{"foo", "bar"}}},
{Tags: []restic.TagList{{"foo"}, {"bar"}}},
- {Within: parseDuration("1d")},
- {Within: parseDuration("2d")},
- {Within: parseDuration("7d")},
- {Within: parseDuration("1m")},
- {Within: parseDuration("1m14d")},
- {Within: parseDuration("1y1d1m")},
- {Within: parseDuration("13d23h")},
- {Within: parseDuration("2m2h")},
- {Within: parseDuration("1y2m3d3h")},
- {WithinHourly: parseDuration("1y2m3d3h")},
- {WithinDaily: parseDuration("1y2m3d3h")},
- {WithinWeekly: parseDuration("1y2m3d3h")},
- {WithinMonthly: parseDuration("1y2m3d3h")},
- {WithinYearly: parseDuration("1y2m3d3h")},
- {Within: parseDuration("1h"),
- WithinHourly: parseDuration("1d"),
- WithinDaily: parseDuration("7d"),
- WithinWeekly: parseDuration("1m"),
- WithinMonthly: parseDuration("1y"),
- WithinYearly: parseDuration("9999y")},
+ {Within: restic.ParseDurationOrPanic("1d")},
+ {Within: restic.ParseDurationOrPanic("2d")},
+ {Within: restic.ParseDurationOrPanic("7d")},
+ {Within: restic.ParseDurationOrPanic("1m")},
+ {Within: restic.ParseDurationOrPanic("1m14d")},
+ {Within: restic.ParseDurationOrPanic("1y1d1m")},
+ {Within: restic.ParseDurationOrPanic("13d23h")},
+ {Within: restic.ParseDurationOrPanic("2m2h")},
+ {Within: restic.ParseDurationOrPanic("1y2m3d3h")},
+ {WithinHourly: restic.ParseDurationOrPanic("1y2m3d3h")},
+ {WithinDaily: restic.ParseDurationOrPanic("1y2m3d3h")},
+ {WithinWeekly: restic.ParseDurationOrPanic("1y2m3d3h")},
+ {WithinMonthly: restic.ParseDurationOrPanic("1y2m3d3h")},
+ {WithinYearly: restic.ParseDurationOrPanic("1y2m3d3h")},
+ {Within: restic.ParseDurationOrPanic("1h"),
+ WithinHourly: restic.ParseDurationOrPanic("1d"),
+ WithinDaily: restic.ParseDurationOrPanic("7d"),
+ WithinWeekly: restic.ParseDurationOrPanic("1m"),
+ WithinMonthly: restic.ParseDurationOrPanic("1y"),
+ WithinYearly: restic.ParseDurationOrPanic("9999y")},
+ {Last: -1}, // keep all
+ {Last: -1, Hourly: -1}, // keep all (Last overrides Hourly)
+ {Hourly: -1}, // keep all hourlies
+ {Daily: 3, Weekly: 2, Monthly: -1, Yearly: -1},
}
for i, p := range tests {
@@ -251,9 +256,9 @@ func TestApplyPolicy(t *testing.T) {
len(keep)+len(remove), len(testExpireSnapshots))
}
- if p.Sum() > 0 && len(keep) > p.Sum() {
+ if policySum(&p) > 0 && len(keep) > policySum(&p) {
t.Errorf("not enough snapshots removed: policy allows %v snapshots to remain, but ended up with %v",
- p.Sum(), len(keep))
+ policySum(&p), len(keep))
}
if len(keep) != len(reasons) {
diff --git a/internal/restic/testdata/policy_keep_snapshots_16 b/internal/restic/testdata/policy_keep_snapshots_16
index d0cae94b5..da6f43a1c 100644
--- a/internal/restic/testdata/policy_keep_snapshots_16
+++ b/internal/restic/testdata/policy_keep_snapshots_16
@@ -14,6 +14,11 @@
"time": "2014-11-22T10:20:30Z",
"tree": null,
"paths": null
+ },
+ {
+ "time": "2014-08-08T10:20:30Z",
+ "tree": null,
+ "paths": null
}
],
"reasons": [
@@ -55,6 +60,19 @@
"counters": {
"yearly": 7
}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "yearly snapshot"
+ ],
+ "counters": {
+ "yearly": 6
+ }
}
]
} \ No newline at end of file
diff --git a/internal/restic/testdata/policy_keep_snapshots_17 b/internal/restic/testdata/policy_keep_snapshots_17
index 742b8005b..ee728d4e0 100644
--- a/internal/restic/testdata/policy_keep_snapshots_17
+++ b/internal/restic/testdata/policy_keep_snapshots_17
@@ -49,6 +49,11 @@
"time": "2014-11-22T10:20:30Z",
"tree": null,
"paths": null
+ },
+ {
+ "time": "2014-08-08T10:20:30Z",
+ "tree": null,
+ "paths": null
}
],
"reasons": [
@@ -201,6 +206,19 @@
"counters": {
"yearly": 7
}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "yearly snapshot"
+ ],
+ "counters": {
+ "yearly": 6
+ }
}
]
} \ No newline at end of file
diff --git a/internal/restic/testdata/policy_keep_snapshots_35 b/internal/restic/testdata/policy_keep_snapshots_35
index a4def907a..ece4ddbd2 100644
--- a/internal/restic/testdata/policy_keep_snapshots_35
+++ b/internal/restic/testdata/policy_keep_snapshots_35
@@ -44,6 +44,11 @@
"time": "2014-11-22T10:20:30Z",
"tree": null,
"paths": null
+ },
+ {
+ "time": "2014-08-08T10:20:30Z",
+ "tree": null,
+ "paths": null
}
],
"reasons": [
@@ -152,6 +157,17 @@
"yearly within 9999y"
],
"counters": {}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "yearly within 9999y"
+ ],
+ "counters": {}
}
]
} \ No newline at end of file
diff --git a/internal/restic/testdata/policy_keep_snapshots_36 b/internal/restic/testdata/policy_keep_snapshots_36
new file mode 100644
index 000000000..75a3a5b46
--- /dev/null
+++ b/internal/restic/testdata/policy_keep_snapshots_36
@@ -0,0 +1,1782 @@
+{
+ "keep": [
+ {
+ "time": "2016-01-18T12:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-12T21:08:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-12T21:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-09T21:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-08T20:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-07T10:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-06T08:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-05T09:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-04T16:23:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-04T12:30:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-04T12:28:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-04T12:24:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-04T12:23:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-04T11:23:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-04T10:23:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-03T07:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-01T07:08:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-01T01:03:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-01T01:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-21T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-18T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-15T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-13T10:20:30.1Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-13T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-12T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-22T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo",
+ "bar"
+ ]
+ },
+ {
+ "time": "2015-10-22T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo",
+ "bar"
+ ]
+ },
+ {
+ "time": "2015-10-22T10:20:30Z",
+ "tree": null,
+ "paths": [
+ "path1",
+ "path2"
+ ],
+ "tags": [
+ "foo",
+ "bar"
+ ]
+ },
+ {
+ "time": "2015-10-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-11T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-09T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-06T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-05T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-02T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-01T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-11T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-09T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-06T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-05T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-02T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-01T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-21T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-18T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-15T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-13T10:20:30.1Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-13T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-12T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-11-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-11-21T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-11-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-11-18T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-11-15T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo",
+ "bar"
+ ]
+ },
+ {
+ "time": "2014-11-13T10:20:30.1Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "bar"
+ ]
+ },
+ {
+ "time": "2014-11-13T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-11-12T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-11-10T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-11-08T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-22T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-20T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-11T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-10T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-09T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-08T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-06T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-05T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-02T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-01T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-09-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-09-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-09-11T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-09-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-09-09T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-09-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-09-06T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-09-05T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-09-02T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-09-01T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-21T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-18T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-15T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-13T10:20:30.1Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-13T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-12T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ }
+ ],
+ "reasons": [
+ {
+ "snapshot": {
+ "time": "2016-01-18T12:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-12T21:08:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-12T21:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-09T21:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-08T20:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-07T10:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-06T08:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-05T09:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-04T16:23:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-04T12:30:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-04T12:28:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-04T12:24:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-04T12:23:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-04T11:23:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-04T10:23:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-03T07:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-01T07:08:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-01T01:03:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-01T01:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-21T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-18T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-15T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-13T10:20:30.1Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-13T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-12T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-22T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo",
+ "bar"
+ ]
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-22T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo",
+ "bar"
+ ]
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-22T10:20:30Z",
+ "tree": null,
+ "paths": [
+ "path1",
+ "path2"
+ ],
+ "tags": [
+ "foo",
+ "bar"
+ ]
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-11T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-09T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-06T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-05T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-02T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-01T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-11T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-09T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-06T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-05T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-02T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-01T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-21T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-18T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-15T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-13T10:20:30.1Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-13T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-12T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-21T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-18T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-15T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo",
+ "bar"
+ ]
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-13T10:20:30.1Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "bar"
+ ]
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-13T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-12T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-10T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-08T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-22T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-20T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-11T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-10T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-09T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-08T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-06T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-05T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-02T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-01T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-11T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-09T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-06T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-05T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-02T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-01T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-21T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-18T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-15T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-13T10:20:30.1Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-13T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-12T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1}
+ }
+ ]
+} \ No newline at end of file
diff --git a/internal/restic/testdata/policy_keep_snapshots_37 b/internal/restic/testdata/policy_keep_snapshots_37
new file mode 100644
index 000000000..f6ffa40ea
--- /dev/null
+++ b/internal/restic/testdata/policy_keep_snapshots_37
@@ -0,0 +1,1872 @@
+{
+ "keep": [
+ {
+ "time": "2016-01-18T12:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-12T21:08:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-12T21:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-09T21:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-08T20:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-07T10:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-06T08:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-05T09:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-04T16:23:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-04T12:30:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-04T12:28:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-04T12:24:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-04T12:23:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-04T11:23:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-04T10:23:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-03T07:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-01T07:08:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-01T01:03:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-01T01:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-21T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-18T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-15T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-13T10:20:30.1Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-13T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-12T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-22T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo",
+ "bar"
+ ]
+ },
+ {
+ "time": "2015-10-22T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo",
+ "bar"
+ ]
+ },
+ {
+ "time": "2015-10-22T10:20:30Z",
+ "tree": null,
+ "paths": [
+ "path1",
+ "path2"
+ ],
+ "tags": [
+ "foo",
+ "bar"
+ ]
+ },
+ {
+ "time": "2015-10-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-11T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-09T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-06T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-05T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-02T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-01T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-11T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-09T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-06T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-05T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-02T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-01T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-21T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-18T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-15T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-13T10:20:30.1Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-13T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-12T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-11-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-11-21T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-11-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-11-18T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-11-15T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo",
+ "bar"
+ ]
+ },
+ {
+ "time": "2014-11-13T10:20:30.1Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "bar"
+ ]
+ },
+ {
+ "time": "2014-11-13T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-11-12T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-11-10T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-11-08T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-22T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-20T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-11T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-10T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-09T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-08T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-06T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-05T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-02T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-01T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-09-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-09-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-09-11T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-09-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-09-09T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-09-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-09-06T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-09-05T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-09-02T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-09-01T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-21T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-18T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-15T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-13T10:20:30.1Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-13T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-12T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ }
+ ],
+ "reasons": [
+ {
+ "snapshot": {
+ "time": "2016-01-18T12:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-12T21:08:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-12T21:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-09T21:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-08T20:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-07T10:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-06T08:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-05T09:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-04T16:23:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-04T12:30:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-04T12:28:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-04T12:24:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-04T12:23:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-04T11:23:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-04T10:23:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-03T07:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-01T07:08:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-01T01:03:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-01T01:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-21T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-18T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-15T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-13T10:20:30.1Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-13T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-12T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-22T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo",
+ "bar"
+ ]
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-22T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo",
+ "bar"
+ ]
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-22T10:20:30Z",
+ "tree": null,
+ "paths": [
+ "path1",
+ "path2"
+ ],
+ "tags": [
+ "foo",
+ "bar"
+ ]
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-11T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-09T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-06T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-05T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-02T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-01T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-11T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-09T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-06T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-05T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-02T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-01T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-21T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-18T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-15T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-13T10:20:30.1Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-13T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-12T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-21T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-18T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-15T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo",
+ "bar"
+ ]
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-13T10:20:30.1Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "bar"
+ ]
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-13T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-12T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-10T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-08T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-22T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-20T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-11T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-10T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-09T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-08T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-06T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-05T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-02T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-01T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-11T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-09T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-06T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-05T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-02T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-01T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-21T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-18T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-15T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-13T10:20:30.1Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-13T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-12T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "last snapshot",
+ "hourly snapshot"
+ ],
+ "counters": {"Last": -1, "Hourly": -1}
+ }
+ ]
+} \ No newline at end of file
diff --git a/internal/restic/testdata/policy_keep_snapshots_38 b/internal/restic/testdata/policy_keep_snapshots_38
new file mode 100644
index 000000000..6bfdd57f1
--- /dev/null
+++ b/internal/restic/testdata/policy_keep_snapshots_38
@@ -0,0 +1,1538 @@
+{
+ "keep": [
+ {
+ "time": "2016-01-18T12:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-12T21:08:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-09T21:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-08T20:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-07T10:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-06T08:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-05T09:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-04T16:23:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-04T12:30:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-04T11:23:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-04T10:23:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-03T07:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-01T07:08:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-01T01:03:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-21T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-18T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-15T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-13T10:20:30.1Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-12T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-11T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-09T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-06T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-05T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-02T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-01T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-11T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-09T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-06T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-05T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-02T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-01T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-21T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-18T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-15T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-13T10:20:30.1Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-12T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-11-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-11-21T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-11-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-11-18T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-11-15T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo",
+ "bar"
+ ]
+ },
+ {
+ "time": "2014-11-13T10:20:30.1Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "bar"
+ ]
+ },
+ {
+ "time": "2014-11-12T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-11-10T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-11-08T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-22T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-20T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-11T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-10T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-09T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-08T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-06T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-05T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-02T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-10-01T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-09-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-09-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-09-11T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-09-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-09-09T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-09-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-09-06T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-09-05T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-09-02T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-09-01T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-21T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-18T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-15T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-13T10:20:30.1Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-12T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ }
+ ],
+ "reasons": [
+ {
+ "snapshot": {
+ "time": "2016-01-18T12:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-12T21:08:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-09T21:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-08T20:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-07T10:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-06T08:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-05T09:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-04T16:23:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-04T12:30:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-04T11:23:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-04T10:23:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-03T07:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-01T07:08:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-01T01:03:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-21T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-18T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-15T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-13T10:20:30.1Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-12T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-11T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-09T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-06T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-05T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-02T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-01T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-11T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-09T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-06T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-05T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-02T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-01T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-21T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-18T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-15T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-13T10:20:30.1Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-12T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-21T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-18T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-15T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo",
+ "bar"
+ ]
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-13T10:20:30.1Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "bar"
+ ]
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-12T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-10T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-08T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-22T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-20T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-11T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-10T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-09T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-08T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-06T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-05T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-02T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-01T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-11T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-09T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-06T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-05T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-02T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-01T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-21T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-20T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-18T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-15T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-13T10:20:30.1Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-12T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-10T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "hourly snapshot"
+ ],
+ "counters": {"Hourly": -1}
+ }
+ ]
+}
diff --git a/internal/restic/testdata/policy_keep_snapshots_39 b/internal/restic/testdata/policy_keep_snapshots_39
new file mode 100644
index 000000000..4b111503b
--- /dev/null
+++ b/internal/restic/testdata/policy_keep_snapshots_39
@@ -0,0 +1,211 @@
+{
+ "keep": [
+ {
+ "time": "2016-01-18T12:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-12T21:08:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2016-01-09T21:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-11-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-10-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-09-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2015-08-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-11-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-10-22T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ {
+ "time": "2014-09-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ {
+ "time": "2014-08-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ }
+ ],
+ "reasons": [
+ {
+ "snapshot": {
+ "time": "2016-01-18T12:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "daily snapshot",
+ "weekly snapshot",
+ "monthly snapshot",
+ "yearly snapshot"
+ ],
+ "counters": {"Daily": 2, "Weekly": 1, "Monthly": -1, "Yearly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-12T21:08:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "daily snapshot",
+ "weekly snapshot"
+ ],
+ "counters": {"Daily": 1, "Monthly": -1, "Yearly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2016-01-09T21:02:03Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "daily snapshot"
+ ],
+ "counters": {"Monthly": -1, "Yearly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-11-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "monthly snapshot",
+ "yearly snapshot"
+ ],
+ "counters": {"Monthly": -1, "Yearly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-10-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "monthly snapshot"
+ ],
+ "counters": {"Monthly": -1, "Yearly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-09-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "monthly snapshot"
+ ],
+ "counters": {"Monthly": -1, "Yearly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2015-08-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "monthly snapshot"
+ ],
+ "counters": {"Monthly": -1, "Yearly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-11-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "monthly snapshot",
+ "yearly snapshot"
+ ],
+ "counters": {"Monthly": -1, "Yearly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-10-22T10:20:30Z",
+ "tree": null,
+ "paths": null,
+ "tags": [
+ "foo"
+ ]
+ },
+ "matches": [
+ "monthly snapshot"
+ ],
+ "counters": {"Monthly": -1, "Yearly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-09-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "monthly snapshot"
+ ],
+ "counters": {"Monthly": -1, "Yearly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-22T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "monthly snapshot"
+ ],
+ "counters": {"Monthly": -1, "Yearly": -1}
+ },
+ {
+ "snapshot": {
+ "time": "2014-08-08T10:20:30Z",
+ "tree": null,
+ "paths": null
+ },
+ "matches": [
+ "monthly snapshot",
+ "yearly snapshot"
+ ],
+ "counters": {"Monthly": -1, "Yearly": -1}
+ }
+ ]
+} \ No newline at end of file
diff --git a/internal/restic/testdata/used_blobs_snapshot0 b/internal/restic/testdata/used_blobs_snapshot0
index 667ad34db..cc789f043 100644
--- a/internal/restic/testdata/used_blobs_snapshot0
+++ b/internal/restic/testdata/used_blobs_snapshot0
@@ -1,7 +1,8 @@
{"ID":"05bddd650a800f83f7c0d844cecb1e02f99ce962df5652a53842be50386078e1","Type":"data"}
{"ID":"087040b12f129e89e4eab2b86aa14467404366a17a6082efb0d11fa7e2f9f58e","Type":"data"}
+{"ID":"08a650e4d7575177ddeabf6a96896b76fa7e621aa3dd75e77293f22ce6c0c420","Type":"tree"}
{"ID":"1e0f0e5799b9d711e07883050366c7eee6b7481c0d884694093149f6c4e9789a","Type":"data"}
-{"ID":"229eac8e4e6c2e8d7b1d9f9627ab5d1a59cb17c5744c1e3634215116e7a92e7d","Type":"tree"}
+{"ID":"435b9207cd489b41a7d119e0d75eab2a861e2b3c8d4d12ac51873ff76be0cf73","Type":"tree"}
{"ID":"4719f8a039f5b745e16cf90e5b84c9255c290d500da716f7dd25909cdabb85b6","Type":"data"}
{"ID":"4e352975938a29711c3003c498185972235af261a6cf8cf700a8a6ee4f914b05","Type":"data"}
{"ID":"606772eacb7fe1a79267088dcadd13431914854faf1d39d47fe99a26b9fecdcb","Type":"data"}
@@ -9,7 +10,6 @@
{"ID":"72b6eb0fd0d87e00392f8b91efc1a4c3f7f5c0c76f861b38aea054bc9d43463b","Type":"data"}
{"ID":"77ab53b52e0cf13b300d1b7f6dac89287c8d86769d85e8a273311006ce6359be","Type":"data"}
{"ID":"99dab094430d3c1be22c801a6ad7364d490a8d2ce3f9dfa3d2677431446925f4","Type":"data"}
-{"ID":"9face1b278a49ef8819fbc1855ce573a85077453bbf6683488cad7767c3a38a7","Type":"tree"}
{"ID":"a4c97189465344038584e76c965dd59100eaed051db1fa5ba0e143897e2c87f1","Type":"data"}
{"ID":"a69c8621776ca8bb34c6c90e5ad811ddc8e2e5cfd6bb0cec5e75cca70e0b9ade","Type":"data"}
{"ID":"b11f4dd9d2722b3325186f57cd13a71a3af7791118477f355b49d101104e4c22","Type":"data"}
@@ -19,5 +19,5 @@
{"ID":"b9e634143719742fe77feed78b61f09573d59d2efa23d6d54afe6c159d220503","Type":"data"}
{"ID":"ca896fc9ebf95fcffd7c768b07b92110b21e332a47fef7e382bf15363b0ece1a","Type":"data"}
{"ID":"e6fe3512ea23a4ebf040d30958c669f7ffe724400f155a756467a9f3cafc27c5","Type":"data"}
-{"ID":"e96774ac5abfbb59940939f614d65a397fb7b5abba76c29bfe14479c6616eea0","Type":"tree"}
{"ID":"ed00928ce97ac5acd27c862d9097e606536e9063af1c47481257811f66260f3a","Type":"data"}
+{"ID":"fb62dd9093c4958b019b90e591b2d36320ff381a24bdc9c5db3b8960ff94d174","Type":"tree"}
diff --git a/internal/restic/testdata/used_blobs_snapshot1 b/internal/restic/testdata/used_blobs_snapshot1
index a5e8caedf..aa840294a 100644
--- a/internal/restic/testdata/used_blobs_snapshot1
+++ b/internal/restic/testdata/used_blobs_snapshot1
@@ -1,4 +1,3 @@
-{"ID":"04ff190aea26dae65ba4c782926cdfb700b484a8b802a5ffd58e3fadcf70b797","Type":"tree"}
{"ID":"05bddd650a800f83f7c0d844cecb1e02f99ce962df5652a53842be50386078e1","Type":"data"}
{"ID":"18dcaa1a676823c909aafabbb909652591915eebdde4f9a65cee955157583494","Type":"data"}
{"ID":"4719f8a039f5b745e16cf90e5b84c9255c290d500da716f7dd25909cdabb85b6","Type":"data"}
@@ -8,8 +7,9 @@
{"ID":"a69c8621776ca8bb34c6c90e5ad811ddc8e2e5cfd6bb0cec5e75cca70e0b9ade","Type":"data"}
{"ID":"b1f2ae9d748035e5bd9a87f2579405166d150c6560d8919496f02855e1c36cf9","Type":"data"}
{"ID":"b9e634143719742fe77feed78b61f09573d59d2efa23d6d54afe6c159d220503","Type":"data"}
-{"ID":"bdd5a029dd295e5998c518022547d185794e72d8f8c38709a638c5841284daef","Type":"tree"}
{"ID":"ca896fc9ebf95fcffd7c768b07b92110b21e332a47fef7e382bf15363b0ece1a","Type":"data"}
{"ID":"cc4cab5b20a3a88995f8cdb8b0698d67a32dbc5b54487f03cb612c30a626af39","Type":"data"}
{"ID":"e6fe3512ea23a4ebf040d30958c669f7ffe724400f155a756467a9f3cafc27c5","Type":"data"}
+{"ID":"e9f3c4fe78e903cba60d310a9668c42232c8274b3f29b5ecebb6ff1aaeabd7e3","Type":"tree"}
{"ID":"ed00928ce97ac5acd27c862d9097e606536e9063af1c47481257811f66260f3a","Type":"data"}
+{"ID":"ff58f76c2313e68aa9aaaece855183855ac4ff682910404c2ae33dc999ebaca2","Type":"tree"}
diff --git a/internal/restic/testdata/used_blobs_snapshot2 b/internal/restic/testdata/used_blobs_snapshot2
index f6404737e..3ed193f53 100644
--- a/internal/restic/testdata/used_blobs_snapshot2
+++ b/internal/restic/testdata/used_blobs_snapshot2
@@ -1,6 +1,7 @@
{"ID":"05bddd650a800f83f7c0d844cecb1e02f99ce962df5652a53842be50386078e1","Type":"data"}
{"ID":"087040b12f129e89e4eab2b86aa14467404366a17a6082efb0d11fa7e2f9f58e","Type":"data"}
{"ID":"0b88f99abc5ac71c54b3e8263c52ecb7d8903462779afdb3c8176ec5c4bb04fb","Type":"data"}
+{"ID":"0e1a817fca83f569d1733b11eba14b6c9b176e41bca3644eed8b29cb907d84d3","Type":"tree"}
{"ID":"1e0f0e5799b9d711e07883050366c7eee6b7481c0d884694093149f6c4e9789a","Type":"data"}
{"ID":"27917462f89cecae77a4c8fb65a094b9b75a917f13794c628b1640b17f4c4981","Type":"data"}
{"ID":"32745e4b26a5883ecec272c9fbfe7f3c9835c9ab41c9a2baa4d06f319697a0bd","Type":"data"}
@@ -10,15 +11,14 @@
{"ID":"6b5fd3a9baf615489c82a99a71f9917bf9a2d82d5f640d7f47d175412c4b8d19","Type":"data"}
{"ID":"95c97192efa810ccb1cee112238dca28673fbffce205d75ce8cc990a31005a51","Type":"data"}
{"ID":"99dab094430d3c1be22c801a6ad7364d490a8d2ce3f9dfa3d2677431446925f4","Type":"data"}
-{"ID":"9face1b278a49ef8819fbc1855ce573a85077453bbf6683488cad7767c3a38a7","Type":"tree"}
{"ID":"a4c97189465344038584e76c965dd59100eaed051db1fa5ba0e143897e2c87f1","Type":"data"}
-{"ID":"a5f2ffcd54e28e2ef3089c35b72aafda66161125e23dad581087ccd050c111c3","Type":"tree"}
{"ID":"a69c8621776ca8bb34c6c90e5ad811ddc8e2e5cfd6bb0cec5e75cca70e0b9ade","Type":"data"}
-{"ID":"ab5205525de94e564e3a00f634fcf9ebc397debd567734c68da7b406e612aae4","Type":"tree"}
{"ID":"b6a7e8d2aa717e0a6bd68abab512c6b566074b5a6ca2edf4cd446edc5857d732","Type":"data"}
-{"ID":"be2055b7125ccf824fcfa8faa4eb3985119012bac26643944eee46218e71306e","Type":"tree"}
+{"ID":"bad84ed273c5fbfb40aa839a171675b7f16f5e67f3eaf4448730caa0ee27297c","Type":"tree"}
{"ID":"bfc2fdb527b0c9f66bbb8d4ff1c44023cc2414efcc7f0831c10debab06bb4388","Type":"tree"}
{"ID":"ca896fc9ebf95fcffd7c768b07b92110b21e332a47fef7e382bf15363b0ece1a","Type":"data"}
+{"ID":"d1d3137eb08de6d8c5d9f44788c45a9fea9bb082e173bed29a0945b3347f2661","Type":"tree"}
{"ID":"e6fe3512ea23a4ebf040d30958c669f7ffe724400f155a756467a9f3cafc27c5","Type":"data"}
{"ID":"ed00928ce97ac5acd27c862d9097e606536e9063af1c47481257811f66260f3a","Type":"data"}
{"ID":"f3cd67d9c14d2a81663d63522ab914e465b021a3b65e2f1ea6caf7478f2ec139","Type":"data"}
+{"ID":"fb62dd9093c4958b019b90e591b2d36320ff381a24bdc9c5db3b8960ff94d174","Type":"tree"}
diff --git a/internal/restic/testing.go b/internal/restic/testing.go
index ebafdf651..004df627c 100644
--- a/internal/restic/testing.go
+++ b/internal/restic/testing.go
@@ -2,7 +2,6 @@ package restic
import (
"context"
- "encoding/json"
"fmt"
"io"
"math/rand"
@@ -19,12 +18,11 @@ func fakeFile(seed, size int64) io.Reader {
}
type fakeFileSystem struct {
- t testing.TB
- repo Repository
- duplication float32
- buf []byte
- chunker *chunker.Chunker
- rand *rand.Rand
+ t testing.TB
+ repo Repository
+ buf []byte
+ chunker *chunker.Chunker
+ rand *rand.Rand
}
// saveFile reads from rd and saves the blobs in the repository. The list of
@@ -51,13 +49,9 @@ func (fs *fakeFileSystem) saveFile(ctx context.Context, rd io.Reader) (blobs IDs
fs.t.Fatalf("unable to save chunk in repo: %v", err)
}
- id := Hash(chunk.Data)
- if !fs.blobIsKnown(BlobHandle{ID: id, Type: DataBlob}) {
- _, _, _, err := fs.repo.SaveBlob(ctx, DataBlob, chunk.Data, id, true)
- if err != nil {
- fs.t.Fatalf("error saving chunk: %v", err)
- }
-
+ id, _, _, err := fs.repo.SaveBlob(ctx, DataBlob, chunk.Data, ID{}, false)
+ if err != nil {
+ fs.t.Fatalf("error saving chunk: %v", err)
}
blobs = append(blobs, id)
@@ -72,30 +66,6 @@ const (
maxNodes = 15
)
-func (fs *fakeFileSystem) treeIsKnown(tree *Tree) (bool, []byte, ID) {
- data, err := json.Marshal(tree)
- if err != nil {
- fs.t.Fatalf("json.Marshal(tree) returned error: %v", err)
- return false, nil, ID{}
- }
- data = append(data, '\n')
-
- id := Hash(data)
- return fs.blobIsKnown(BlobHandle{ID: id, Type: TreeBlob}), data, id
-}
-
-func (fs *fakeFileSystem) blobIsKnown(bh BlobHandle) bool {
- if fs.rand.Float32() < fs.duplication {
- return false
- }
-
- if fs.repo.Index().Has(bh) {
- return true
- }
-
- return false
-}
-
// saveTree saves a tree of fake files in the repo and returns the ID.
func (fs *fakeFileSystem) saveTree(ctx context.Context, seed int64, depth int) ID {
rnd := rand.NewSource(seed)
@@ -134,16 +104,12 @@ func (fs *fakeFileSystem) saveTree(ctx context.Context, seed int64, depth int) I
tree.Nodes = append(tree.Nodes, node)
}
- known, buf, id := fs.treeIsKnown(&tree)
- if known {
- return id
- }
+ tree.Sort()
- _, _, _, err := fs.repo.SaveBlob(ctx, TreeBlob, buf, id, false)
+ id, err := SaveTree(ctx, fs.repo, &tree)
if err != nil {
- fs.t.Fatal(err)
+ fs.t.Fatalf("SaveTree returned error: %v", err)
}
-
return id
}
@@ -152,22 +118,20 @@ func (fs *fakeFileSystem) saveTree(ctx context.Context, seed int64, depth int) I
// also used as the snapshot's timestamp. The tree's depth can be specified
// with the parameter depth. The parameter duplication is a probability that
// the same blob will saved again.
-func TestCreateSnapshot(t testing.TB, repo Repository, at time.Time, depth int, duplication float32) *Snapshot {
+func TestCreateSnapshot(t testing.TB, repo Repository, at time.Time, depth int) *Snapshot {
seed := at.Unix()
t.Logf("create fake snapshot at %s with seed %d", at, seed)
fakedir := fmt.Sprintf("fakedir-at-%v", at.Format("2006-01-02 15:04:05"))
- snapshot, err := NewSnapshot([]string{fakedir}, []string{"test"}, "foo", time.Now())
+ snapshot, err := NewSnapshot([]string{fakedir}, []string{"test"}, "foo", at)
if err != nil {
t.Fatal(err)
}
- snapshot.Time = at
fs := fakeFileSystem{
- t: t,
- repo: repo,
- duplication: duplication,
- rand: rand.New(rand.NewSource(seed)),
+ t: t,
+ repo: repo,
+ rand: rand.New(rand.NewSource(seed)),
}
var wg errgroup.Group
@@ -209,6 +173,17 @@ func TestParseHandle(s string, t BlobType) BlobHandle {
}
// TestSetSnapshotID sets the snapshot's ID.
-func TestSetSnapshotID(t testing.TB, sn *Snapshot, id ID) {
+func TestSetSnapshotID(_ testing.TB, sn *Snapshot, id ID) {
sn.id = &id
}
+
+// ParseDurationOrPanic parses a duration from a string or panics if string is invalid.
+// The format is `6y5m234d37h`.
+func ParseDurationOrPanic(s string) Duration {
+ d, err := ParseDuration(s)
+ if err != nil {
+ panic(err)
+ }
+
+ return d
+}
diff --git a/internal/restic/testing_test.go b/internal/restic/testing_test.go
index 2af5c607e..760a53a52 100644
--- a/internal/restic/testing_test.go
+++ b/internal/restic/testing_test.go
@@ -39,7 +39,7 @@ func loadAllSnapshots(ctx context.Context, repo restic.Repository, excludeIDs re
func TestCreateSnapshot(t *testing.T) {
repo := repository.TestRepository(t)
for i := 0; i < testCreateSnapshots; i++ {
- restic.TestCreateSnapshot(t, repo, testSnapshotTime.Add(time.Duration(i)*time.Second), testDepth, 0)
+ restic.TestCreateSnapshot(t, repo, testSnapshotTime.Add(time.Duration(i)*time.Second), testDepth)
}
snapshots, err := loadAllSnapshots(context.TODO(), repo, restic.NewIDSet())
@@ -73,6 +73,6 @@ func BenchmarkTestCreateSnapshot(t *testing.B) {
t.ResetTimer()
for i := 0; i < t.N; i++ {
- restic.TestCreateSnapshot(t, repo, testSnapshotTime.Add(time.Duration(i)*time.Second), testDepth, 0)
+ restic.TestCreateSnapshot(t, repo, testSnapshotTime.Add(time.Duration(i)*time.Second), testDepth)
}
}
diff --git a/internal/restic/tree.go b/internal/restic/tree.go
index 373b36746..3c3e3ab56 100644
--- a/internal/restic/tree.go
+++ b/internal/restic/tree.go
@@ -5,7 +5,9 @@ import (
"context"
"encoding/json"
"fmt"
+ "path"
"sort"
+ "strings"
"github.com/restic/restic/internal/errors"
@@ -184,3 +186,32 @@ func (builder *TreeJSONBuilder) Finalize() ([]byte, error) {
builder.buf = bytes.Buffer{}
return buf, nil
}
+
+func FindTreeDirectory(ctx context.Context, repo BlobLoader, id *ID, dir string) (*ID, error) {
+ if id == nil {
+ return nil, errors.New("tree id is null")
+ }
+
+ dirs := strings.Split(path.Clean(dir), "/")
+ subfolder := ""
+
+ for _, name := range dirs {
+ if name == "" || name == "." {
+ continue
+ }
+ subfolder = path.Join(subfolder, name)
+ tree, err := LoadTree(ctx, repo, *id)
+ if err != nil {
+ return nil, fmt.Errorf("path %s: %w", subfolder, err)
+ }
+ node := tree.Find(name)
+ if node == nil {
+ return nil, fmt.Errorf("path %s: not found", subfolder)
+ }
+ if node.Type != "dir" || node.Subtree == nil {
+ return nil, fmt.Errorf("path %s: not a directory", subfolder)
+ }
+ id = node.Subtree
+ }
+ return id, nil
+}
diff --git a/internal/restic/tree_test.go b/internal/restic/tree_test.go
index fb25ca373..da674eb1c 100644
--- a/internal/restic/tree_test.go
+++ b/internal/restic/tree_test.go
@@ -210,3 +210,37 @@ func benchmarkLoadTree(t *testing.B, version uint) {
rtest.OK(t, err)
}
}
+
+func TestFindTreeDirectory(t *testing.T) {
+ repo := repository.TestRepository(t)
+ sn := restic.TestCreateSnapshot(t, repo, parseTimeUTC("2017-07-07 07:07:08"), 3)
+
+ for _, exp := range []struct {
+ subfolder string
+ id restic.ID
+ err error
+ }{
+ {"", restic.TestParseID("c25199703a67455b34cc0c6e49a8ac8861b268a5dd09dc5b2e31e7380973fc97"), nil},
+ {"/", restic.TestParseID("c25199703a67455b34cc0c6e49a8ac8861b268a5dd09dc5b2e31e7380973fc97"), nil},
+ {".", restic.TestParseID("c25199703a67455b34cc0c6e49a8ac8861b268a5dd09dc5b2e31e7380973fc97"), nil},
+ {"..", restic.ID{}, errors.New("path ..: not found")},
+ {"file-1", restic.ID{}, errors.New("path file-1: not a directory")},
+ {"dir-21", restic.TestParseID("76172f9dec15d7e4cb98d2993032e99f06b73b2f02ffea3b7cfd9e6b4d762712"), nil},
+ {"/dir-21", restic.TestParseID("76172f9dec15d7e4cb98d2993032e99f06b73b2f02ffea3b7cfd9e6b4d762712"), nil},
+ {"dir-21/", restic.TestParseID("76172f9dec15d7e4cb98d2993032e99f06b73b2f02ffea3b7cfd9e6b4d762712"), nil},
+ {"dir-21/dir-24", restic.TestParseID("74626b3fb2bd4b3e572b81a4059b3e912bcf2a8f69fecd9c187613b7173f13b1"), nil},
+ } {
+ t.Run("", func(t *testing.T) {
+ id, err := restic.FindTreeDirectory(context.TODO(), repo, sn.Tree, exp.subfolder)
+ if exp.err == nil {
+ rtest.OK(t, err)
+ rtest.Assert(t, exp.id == *id, "unexpected id, expected %v, got %v", exp.id, id)
+ } else {
+ rtest.Assert(t, exp.err.Error() == err.Error(), "unexpected err, expected %v, got %v", exp.err, err)
+ }
+ })
+ }
+
+ _, err := restic.FindTreeDirectory(context.TODO(), repo, nil, "")
+ rtest.Assert(t, err != nil, "missing error on null tree id")
+}
diff --git a/internal/restorer/filerestorer.go b/internal/restorer/filerestorer.go
index 2deef1cd2..3bb7489ba 100644
--- a/internal/restorer/filerestorer.go
+++ b/internal/restorer/filerestorer.go
@@ -12,6 +12,7 @@ import (
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
+ "github.com/restic/restic/internal/ui/restore"
)
// TODO if a blob is corrupt, there may be good blob copies in other packs
@@ -54,6 +55,7 @@ type fileRestorer struct {
filesWriter *filesWriter
zeroChunk restic.ID
sparse bool
+ progress *restore.Progress
dst string
files []*fileInfo
@@ -65,7 +67,8 @@ func newFileRestorer(dst string,
key *crypto.Key,
idx func(restic.BlobHandle) []restic.PackedBlob,
connections uint,
- sparse bool) *fileRestorer {
+ sparse bool,
+ progress *restore.Progress) *fileRestorer {
// as packs are streamed the concurrency is limited by IO
workerCount := int(connections)
@@ -77,6 +80,7 @@ func newFileRestorer(dst string,
filesWriter: newFilesWriter(workerCount),
zeroChunk: repository.ZeroChunk(),
sparse: sparse,
+ progress: progress,
workerCount: workerCount,
dst: dst,
Error: restorerAbortOnAllErrors,
@@ -177,6 +181,8 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error {
wg.Go(func() error {
for _, id := range packOrder {
pack := packs[id]
+ // allow garbage collection of packInfo
+ delete(packs, id)
select {
case <-ctx.Done():
return ctx.Err()
@@ -268,7 +274,13 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) error {
file.inProgress = true
createSize = file.size
}
- return r.filesWriter.writeToFile(r.targetPath(file.location), blobData, offset, createSize, file.sparse)
+ writeErr := r.filesWriter.writeToFile(r.targetPath(file.location), blobData, offset, createSize, file.sparse)
+
+ if r.progress != nil {
+ r.progress.AddProgress(file.location, uint64(len(blobData)), uint64(file.size))
+ }
+
+ return writeErr
}
err := sanitizeError(file, writeToFile())
if err != nil {
diff --git a/internal/restorer/filerestorer_test.go b/internal/restorer/filerestorer_test.go
index b39afa249..e798f2b8b 100644
--- a/internal/restorer/filerestorer_test.go
+++ b/internal/restorer/filerestorer_test.go
@@ -150,7 +150,7 @@ func newTestRepo(content []TestFile) *TestRepo {
func restoreAndVerify(t *testing.T, tempdir string, content []TestFile, files map[string]bool, sparse bool) {
repo := newTestRepo(content)
- r := newFileRestorer(tempdir, repo.loader, repo.key, repo.Lookup, 2, sparse)
+ r := newFileRestorer(tempdir, repo.loader, repo.key, repo.Lookup, 2, sparse, nil)
if files == nil {
r.files = repo.files
@@ -265,7 +265,7 @@ func TestErrorRestoreFiles(t *testing.T) {
return loadError
}
- r := newFileRestorer(tempdir, repo.loader, repo.key, repo.Lookup, 2, false)
+ r := newFileRestorer(tempdir, repo.loader, repo.key, repo.Lookup, 2, false, nil)
r.files = repo.files
err := r.restoreFiles(context.TODO())
@@ -304,7 +304,7 @@ func testPartialDownloadError(t *testing.T, part int) {
return loader(ctx, h, length, offset, fn)
}
- r := newFileRestorer(tempdir, repo.loader, repo.key, repo.Lookup, 2, false)
+ r := newFileRestorer(tempdir, repo.loader, repo.key, repo.Lookup, 2, false, nil)
r.files = repo.files
r.Error = func(s string, e error) error {
// ignore errors as in the `restore` command
diff --git a/internal/restorer/fileswriter.go b/internal/restorer/fileswriter.go
index 0a26101f4..589aa502a 100644
--- a/internal/restorer/fileswriter.go
+++ b/internal/restorer/fileswriter.go
@@ -6,6 +6,7 @@ import (
"github.com/cespare/xxhash/v2"
"github.com/restic/restic/internal/debug"
+ "github.com/restic/restic/internal/fs"
)
// writes blobs to target files.
@@ -72,7 +73,7 @@ func (w *filesWriter) writeToFile(path string, blob []byte, offset int64, create
return nil, err
}
} else {
- err := preallocateFile(wr.File, createSize)
+ err := fs.PreallocateFile(wr.File, createSize)
if err != nil {
// Just log the preallocate error but don't let it cause the restore process to fail.
// Preallocate might return an error if the filesystem (implementation) does not
diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go
index 4dfe3c3a8..3c60aca1b 100644
--- a/internal/restorer/restorer.go
+++ b/internal/restorer/restorer.go
@@ -10,6 +10,7 @@ import (
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/restic"
+ restoreui "github.com/restic/restic/internal/ui/restore"
"golang.org/x/sync/errgroup"
)
@@ -20,6 +21,8 @@ type Restorer struct {
sn *restic.Snapshot
sparse bool
+ progress *restoreui.Progress
+
Error func(location string, err error) error
SelectFilter func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool)
}
@@ -27,12 +30,14 @@ type Restorer struct {
var restorerAbortOnAllErrors = func(location string, err error) error { return err }
// NewRestorer creates a restorer preloaded with the content from the snapshot id.
-func NewRestorer(ctx context.Context, repo restic.Repository, sn *restic.Snapshot, sparse bool) *Restorer {
+func NewRestorer(repo restic.Repository, sn *restic.Snapshot, sparse bool,
+ progress *restoreui.Progress) *Restorer {
r := &Restorer{
repo: repo,
sparse: sparse,
Error: restorerAbortOnAllErrors,
SelectFilter: func(string, string, *restic.Node) (bool, bool) { return true, true },
+ progress: progress,
sn: sn,
}
@@ -161,12 +166,14 @@ func (res *Restorer) restoreNodeTo(ctx context.Context, node *restic.Node, targe
err := node.CreateAt(ctx, target, res.repo)
if err != nil {
debug.Log("node.CreateAt(%s) error %v", target, err)
+ return err
}
- if err == nil {
- err = res.restoreNodeMetadataTo(node, target, location)
+
+ if res.progress != nil {
+ res.progress.AddProgress(location, 0, 0)
}
- return err
+ return res.restoreNodeMetadataTo(node, target, location)
}
func (res *Restorer) restoreNodeMetadataTo(node *restic.Node, target, location string) error {
@@ -186,6 +193,11 @@ func (res *Restorer) restoreHardlinkAt(node *restic.Node, target, path, location
if err != nil {
return errors.WithStack(err)
}
+
+ if res.progress != nil {
+ res.progress.AddProgress(location, 0, 0)
+ }
+
// TODO investigate if hardlinks have separate metadata on any supported system
return res.restoreNodeMetadataTo(node, path, location)
}
@@ -200,6 +212,10 @@ func (res *Restorer) restoreEmptyFileAt(node *restic.Node, target, location stri
return err
}
+ if res.progress != nil {
+ res.progress.AddProgress(location, 0, 0)
+ }
+
return res.restoreNodeMetadataTo(node, target, location)
}
@@ -215,7 +231,8 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error {
}
idx := NewHardlinkIndex()
- filerestorer := newFileRestorer(dst, res.repo.Backend().Load, res.repo.Key(), res.repo.Index().Lookup, res.repo.Connections(), res.sparse)
+ filerestorer := newFileRestorer(dst, res.repo.Backend().Load, res.repo.Key(), res.repo.Index().Lookup,
+ res.repo.Connections(), res.sparse, res.progress)
filerestorer.Error = res.Error
debug.Log("first pass for %q", dst)
@@ -224,6 +241,9 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error {
_, err = res.traverseTree(ctx, dst, string(filepath.Separator), *res.sn.Tree, treeVisitor{
enterDir: func(node *restic.Node, target, location string) error {
debug.Log("first pass, enterDir: mkdir %q, leaveDir should restore metadata", location)
+ if res.progress != nil {
+ res.progress.AddFile(0)
+ }
// create dir with default permissions
// #leaveDir restores dir metadata after visiting all children
return fs.MkdirAll(target, 0700)
@@ -239,20 +259,34 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error {
}
if node.Type != "file" {
+ if res.progress != nil {
+ res.progress.AddFile(0)
+ }
return nil
}
if node.Size == 0 {
+ if res.progress != nil {
+ res.progress.AddFile(node.Size)
+ }
return nil // deal with empty files later
}
if node.Links > 1 {
if idx.Has(node.Inode, node.DeviceID) {
+ if res.progress != nil {
+ // a hardlinked file does not increase the restore size
+ res.progress.AddFile(0)
+ }
return nil
}
idx.Add(node.Inode, node.DeviceID, location)
}
+ if res.progress != nil {
+ res.progress.AddFile(node.Size)
+ }
+
filerestorer.addFile(location, node.Content, int64(node.Size))
return nil
@@ -291,7 +325,13 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error {
return res.restoreNodeMetadataTo(node, target, location)
},
- leaveDir: res.restoreNodeMetadataTo,
+ leaveDir: func(node *restic.Node, target, location string) error {
+ err := res.restoreNodeMetadataTo(node, target, location)
+ if err == nil && res.progress != nil {
+ res.progress.AddProgress(location, 0, 0)
+ }
+ return err
+ },
})
return err
}
diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go
index d6cd0c80a..6c45d5556 100644
--- a/internal/restorer/restorer_test.go
+++ b/internal/restorer/restorer_test.go
@@ -325,7 +325,7 @@ func TestRestorer(t *testing.T) {
sn, id := saveSnapshot(t, repo, test.Snapshot)
t.Logf("snapshot saved as %v", id.Str())
- res := NewRestorer(context.TODO(), repo, sn, false)
+ res := NewRestorer(repo, sn, false, nil)
tempdir := rtest.TempDir(t)
// make sure we're creating a new subdir of the tempdir
@@ -442,7 +442,7 @@ func TestRestorerRelative(t *testing.T) {
sn, id := saveSnapshot(t, repo, test.Snapshot)
t.Logf("snapshot saved as %v", id.Str())
- res := NewRestorer(context.TODO(), repo, sn, false)
+ res := NewRestorer(repo, sn, false, nil)
tempdir := rtest.TempDir(t)
cleanup := rtest.Chdir(t, tempdir)
@@ -671,7 +671,7 @@ func TestRestorerTraverseTree(t *testing.T) {
repo := repository.TestRepository(t)
sn, _ := saveSnapshot(t, repo, test.Snapshot)
- res := NewRestorer(context.TODO(), repo, sn, false)
+ res := NewRestorer(repo, sn, false, nil)
res.SelectFilter = test.Select
@@ -747,7 +747,7 @@ func TestRestorerConsistentTimestampsAndPermissions(t *testing.T) {
},
})
- res := NewRestorer(context.TODO(), repo, sn, false)
+ res := NewRestorer(repo, sn, false, nil)
res.SelectFilter = func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) {
switch filepath.ToSlash(item) {
@@ -802,7 +802,7 @@ func TestVerifyCancel(t *testing.T) {
repo := repository.TestRepository(t)
sn, _ := saveSnapshot(t, repo, snapshot)
- res := NewRestorer(context.TODO(), repo, sn, false)
+ res := NewRestorer(repo, sn, false, nil)
tempdir := rtest.TempDir(t)
ctx, cancel := context.WithCancel(context.Background())
@@ -844,7 +844,7 @@ func TestRestorerSparseFiles(t *testing.T) {
archiver.SnapshotOptions{})
rtest.OK(t, err)
- res := NewRestorer(context.TODO(), repo, sn, true)
+ res := NewRestorer(repo, sn, true, nil)
tempdir := rtest.TempDir(t)
ctx, cancel := context.WithCancel(context.Background())
diff --git a/internal/restorer/restorer_unix_test.go b/internal/restorer/restorer_unix_test.go
index dc327a9c9..2c30a6b64 100644
--- a/internal/restorer/restorer_unix_test.go
+++ b/internal/restorer/restorer_unix_test.go
@@ -9,10 +9,12 @@ import (
"path/filepath"
"syscall"
"testing"
+ "time"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
+ restoreui "github.com/restic/restic/internal/ui/restore"
)
func TestRestorerRestoreEmptyHardlinkedFileds(t *testing.T) {
@@ -29,7 +31,7 @@ func TestRestorerRestoreEmptyHardlinkedFileds(t *testing.T) {
},
})
- res := NewRestorer(context.TODO(), repo, sn, false)
+ res := NewRestorer(repo, sn, false, nil)
res.SelectFilter = func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) {
return true, true
@@ -66,3 +68,56 @@ func getBlockCount(t *testing.T, filename string) int64 {
}
return st.Blocks
}
+
+type printerMock struct {
+ filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64
+}
+
+func (p *printerMock) Update(_, _, _, _ uint64, _ time.Duration) {
+}
+func (p *printerMock) Finish(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, _ time.Duration) {
+ p.filesFinished = filesFinished
+ p.filesTotal = filesTotal
+ p.allBytesWritten = allBytesWritten
+ p.allBytesTotal = allBytesTotal
+}
+
+func TestRestorerProgressBar(t *testing.T) {
+ repo := repository.TestRepository(t)
+
+ sn, _ := saveSnapshot(t, repo, Snapshot{
+ Nodes: map[string]Node{
+ "dirtest": Dir{
+ Nodes: map[string]Node{
+ "file1": File{Links: 2, Inode: 1, Data: "foo"},
+ "file2": File{Links: 2, Inode: 1, Data: "foo"},
+ },
+ },
+ "file2": File{Links: 1, Inode: 2, Data: "example"},
+ },
+ })
+
+ mock := &printerMock{}
+ progress := restoreui.NewProgress(mock, 0)
+ res := NewRestorer(repo, sn, false, progress)
+ res.SelectFilter = func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) {
+ return true, true
+ }
+
+ tempdir := rtest.TempDir(t)
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ err := res.RestoreTo(ctx, tempdir)
+ rtest.OK(t, err)
+ progress.Finish()
+
+ const filesFinished = 4
+ const filesTotal = filesFinished
+ const allBytesWritten = 10
+ const allBytesTotal = allBytesWritten
+ rtest.Assert(t, mock.filesFinished == filesFinished, "filesFinished: expected %v, got %v", filesFinished, mock.filesFinished)
+ rtest.Assert(t, mock.filesTotal == filesTotal, "filesTotal: expected %v, got %v", filesTotal, mock.filesTotal)
+ rtest.Assert(t, mock.allBytesWritten == allBytesWritten, "allBytesWritten: expected %v, got %v", allBytesWritten, mock.allBytesWritten)
+ rtest.Assert(t, mock.allBytesTotal == allBytesTotal, "allBytesTotal: expected %v, got %v", allBytesTotal, mock.allBytesTotal)
+}
diff --git a/internal/selfupdate/download.go b/internal/selfupdate/download.go
index 2c7441e3e..271383d86 100644
--- a/internal/selfupdate/download.go
+++ b/internal/selfupdate/download.go
@@ -68,21 +68,21 @@ func extractToFile(buf []byte, filename, target string, printf func(string, ...i
// Write everything to a temp file
dir := filepath.Dir(target)
- new, err := os.CreateTemp(dir, "restic")
+ newFile, err := os.CreateTemp(dir, "restic")
if err != nil {
return err
}
- n, err := io.Copy(new, rd)
+ n, err := io.Copy(newFile, rd)
if err != nil {
- _ = new.Close()
- _ = os.Remove(new.Name())
+ _ = newFile.Close()
+ _ = os.Remove(newFile.Name())
return err
}
- if err = new.Sync(); err != nil {
+ if err = newFile.Sync(); err != nil {
return err
}
- if err = new.Close(); err != nil {
+ if err = newFile.Close(); err != nil {
return err
}
@@ -98,7 +98,7 @@ func extractToFile(buf []byte, filename, target string, printf func(string, ...i
}
// Rename the temp file to the final location atomically.
- if err := os.Rename(new.Name(), target); err != nil {
+ if err := os.Rename(newFile.Name(), target); err != nil {
return err
}
diff --git a/internal/selfupdate/download_unix.go b/internal/selfupdate/download_unix.go
index c6189e9d9..bc1762948 100644
--- a/internal/selfupdate/download_unix.go
+++ b/internal/selfupdate/download_unix.go
@@ -4,7 +4,7 @@
package selfupdate
// Remove the target binary.
-func removeResticBinary(dir, target string) error {
+func removeResticBinary(_, _ string) error {
// removed on rename on this platform
return nil
}
diff --git a/internal/test/helpers.go b/internal/test/helpers.go
index 93178ae10..65e3e36ec 100644
--- a/internal/test/helpers.go
+++ b/internal/test/helpers.go
@@ -3,13 +3,11 @@ package test
import (
"compress/bzip2"
"compress/gzip"
- "fmt"
"io"
"os"
"os/exec"
"path/filepath"
"reflect"
- "runtime"
"testing"
"github.com/restic/restic/internal/errors"
@@ -19,30 +17,28 @@ import (
// Assert fails the test if the condition is false.
func Assert(tb testing.TB, condition bool, msg string, v ...interface{}) {
+ tb.Helper()
if !condition {
- _, file, line, _ := runtime.Caller(1)
- fmt.Printf("\033[31m%s:%d: "+msg+"\033[39m\n\n", append([]interface{}{filepath.Base(file), line}, v...)...)
- tb.FailNow()
+ tb.Fatalf("\033[31m"+msg+"\033[39m\n\n", v...)
}
}
// OK fails the test if an err is not nil.
func OK(tb testing.TB, err error) {
+ tb.Helper()
if err != nil {
- _, file, line, _ := runtime.Caller(1)
- fmt.Printf("\033[31m%s:%d: unexpected error: %+v\033[39m\n\n", filepath.Base(file), line, err)
- tb.FailNow()
+ tb.Fatalf("\033[31munexpected error: %+v\033[39m\n\n", err)
}
}
// OKs fails the test if any error from errs is not nil.
func OKs(tb testing.TB, errs []error) {
+ tb.Helper()
errFound := false
for _, err := range errs {
if err != nil {
errFound = true
- _, file, line, _ := runtime.Caller(1)
- fmt.Printf("\033[31m%s:%d: unexpected error: %+v\033[39m\n\n", filepath.Base(file), line, err.Error())
+ tb.Logf("\033[31munexpected error: %+v\033[39m\n\n", err.Error())
}
}
if errFound {
@@ -52,10 +48,9 @@ func OKs(tb testing.TB, errs []error) {
// Equals fails the test if exp is not equal to act.
func Equals(tb testing.TB, exp, act interface{}) {
+ tb.Helper()
if !reflect.DeepEqual(exp, act) {
- _, file, line, _ := runtime.Caller(1)
- fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act)
- tb.FailNow()
+ tb.Fatalf("\033[31m\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", exp, act)
}
}
@@ -92,6 +87,7 @@ func Random(seed, count int) []byte {
// SetupTarTestFixture extracts the tarFile to outputDir.
func SetupTarTestFixture(t testing.TB, outputDir, tarFile string) {
+ t.Helper()
input, err := os.Open(tarFile)
OK(t, err)
defer func() {
@@ -130,6 +126,7 @@ func SetupTarTestFixture(t testing.TB, outputDir, tarFile string) {
// Env creates a test environment and extracts the repository fixture.
// Returned is the repo path and a cleanup function.
func Env(t testing.TB, repoFixture string) (repodir string, cleanup func()) {
+ t.Helper()
tempdir, err := os.MkdirTemp(TestTempDir, "restic-test-env-")
OK(t, err)
@@ -159,6 +156,7 @@ func isFile(fi os.FileInfo) bool {
// This is mainly used for tests on Windows, which is unable to delete a file
// set read-only.
func ResetReadOnly(t testing.TB, dir string) {
+ t.Helper()
err := filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error {
if fi == nil {
return err
@@ -183,6 +181,7 @@ func ResetReadOnly(t testing.TB, dir string) {
// RemoveAll recursively resets the read-only flag of all files and dirs and
// afterwards uses os.RemoveAll() to remove the path.
func RemoveAll(t testing.TB, path string) {
+ t.Helper()
ResetReadOnly(t, path)
err := os.RemoveAll(path)
if errors.Is(err, os.ErrNotExist) {
@@ -194,6 +193,7 @@ func RemoveAll(t testing.TB, path string) {
// TempDir returns a temporary directory that is removed by t.Cleanup,
// except if TestCleanupTempDirs is set to false.
func TempDir(t testing.TB) string {
+ t.Helper()
tempdir, err := os.MkdirTemp(TestTempDir, "restic-test-")
if err != nil {
t.Fatal(err)
diff --git a/internal/ui/backup/json.go b/internal/ui/backup/json.go
index 85076b3bb..10f0e91fa 100644
--- a/internal/ui/backup/json.go
+++ b/internal/ui/backup/json.go
@@ -1,8 +1,6 @@
package backup
import (
- "bytes"
- "encoding/json"
"sort"
"time"
@@ -32,21 +30,12 @@ func NewJSONProgress(term *termstatus.Terminal, verbosity uint) *JSONProgress {
}
}
-func toJSONString(status interface{}) string {
- buf := new(bytes.Buffer)
- err := json.NewEncoder(buf).Encode(status)
- if err != nil {
- panic(err)
- }
- return buf.String()
-}
-
func (b *JSONProgress) print(status interface{}) {
- b.term.Print(toJSONString(status))
+ b.term.Print(ui.ToJSONString(status))
}
func (b *JSONProgress) error(status interface{}) {
- b.term.Error(toJSONString(status))
+ b.term.Error(ui.ToJSONString(status))
}
// Update updates the status lines.
@@ -99,7 +88,7 @@ func (b *JSONProgress) Error(item string, err error) error {
// CompleteItem is the status callback function for the archiver when a
// file/dir has been saved successfully.
-func (b *JSONProgress) CompleteItem(messageType, item string, previous, current *restic.Node, s archiver.ItemStats, d time.Duration) {
+func (b *JSONProgress) CompleteItem(messageType, item string, s archiver.ItemStats, d time.Duration) {
if b.v < 2 {
return
}
@@ -161,10 +150,10 @@ func (b *JSONProgress) CompleteItem(messageType, item string, previous, current
}
// ReportTotal sets the total stats up to now
-func (b *JSONProgress) ReportTotal(item string, start time.Time, s archiver.ScanStats) {
+func (b *JSONProgress) ReportTotal(start time.Time, s archiver.ScanStats) {
if b.v >= 2 {
b.print(verboseUpdate{
- MessageType: "status",
+ MessageType: "verbose_status",
Action: "scan_finished",
Duration: time.Since(start).Seconds(),
DataSize: s.Bytes,
diff --git a/internal/ui/backup/progress.go b/internal/ui/backup/progress.go
index 720a2a58f..4362a8c83 100644
--- a/internal/ui/backup/progress.go
+++ b/internal/ui/backup/progress.go
@@ -15,8 +15,8 @@ type ProgressPrinter interface {
Update(total, processed Counter, errors uint, currentFiles map[string]struct{}, start time.Time, secs uint64)
Error(item string, err error) error
ScannerError(item string, err error) error
- CompleteItem(messageType string, item string, previous, current *restic.Node, s archiver.ItemStats, d time.Duration)
- ReportTotal(item string, start time.Time, s archiver.ScanStats)
+ CompleteItem(messageType string, item string, s archiver.ItemStats, d time.Duration)
+ ReportTotal(start time.Time, s archiver.ScanStats)
Finish(snapshotID restic.ID, start time.Time, summary *Summary, dryRun bool)
Reset()
@@ -43,7 +43,8 @@ type Progress struct {
progress.Updater
mu sync.Mutex
- start time.Time
+ start time.Time
+ estimator rateEstimator
scanStarted, scanFinished bool
@@ -60,6 +61,7 @@ func NewProgress(printer ProgressPrinter, interval time.Duration) *Progress {
start: time.Now(),
currentFiles: make(map[string]struct{}),
printer: printer,
+ estimator: *newRateEstimator(time.Now()),
}
p.Updater = *progress.NewUpdater(interval, func(runtime time.Duration, final bool) {
if final {
@@ -73,9 +75,14 @@ func NewProgress(printer ProgressPrinter, interval time.Duration) *Progress {
var secondsRemaining uint64
if p.scanFinished {
- secs := float64(runtime / time.Second)
- todo := float64(p.total.Bytes - p.processed.Bytes)
- secondsRemaining = uint64(secs / float64(p.processed.Bytes) * todo)
+ rate := p.estimator.rate(time.Now())
+ tooSlowCutoff := 1024.
+ if rate <= tooSlowCutoff {
+ secondsRemaining = 0
+ } else {
+ todo := float64(p.total.Bytes - p.processed.Bytes)
+ secondsRemaining = uint64(todo / rate)
+ }
}
p.printer.Update(p.total, p.processed, p.errors, p.currentFiles, p.start, secondsRemaining)
@@ -105,6 +112,7 @@ func (p *Progress) addProcessed(c Counter) {
p.processed.Files += c.Files
p.processed.Dirs += c.Dirs
p.processed.Bytes += c.Bytes
+ p.estimator.recordBytes(time.Now(), c.Bytes)
p.scanStarted = true
}
@@ -144,19 +152,19 @@ func (p *Progress) CompleteItem(item string, previous, current *restic.Node, s a
switch {
case previous == nil:
- p.printer.CompleteItem("dir new", item, previous, current, s, d)
+ p.printer.CompleteItem("dir new", item, s, d)
p.mu.Lock()
p.summary.Dirs.New++
p.mu.Unlock()
case previous.Equals(*current):
- p.printer.CompleteItem("dir unchanged", item, previous, current, s, d)
+ p.printer.CompleteItem("dir unchanged", item, s, d)
p.mu.Lock()
p.summary.Dirs.Unchanged++
p.mu.Unlock()
default:
- p.printer.CompleteItem("dir modified", item, previous, current, s, d)
+ p.printer.CompleteItem("dir modified", item, s, d)
p.mu.Lock()
p.summary.Dirs.Changed++
p.mu.Unlock()
@@ -170,19 +178,19 @@ func (p *Progress) CompleteItem(item string, previous, current *restic.Node, s a
switch {
case previous == nil:
- p.printer.CompleteItem("file new", item, previous, current, s, d)
+ p.printer.CompleteItem("file new", item, s, d)
p.mu.Lock()
p.summary.Files.New++
p.mu.Unlock()
case previous.Equals(*current):
- p.printer.CompleteItem("file unchanged", item, previous, current, s, d)
+ p.printer.CompleteItem("file unchanged", item, s, d)
p.mu.Lock()
p.summary.Files.Unchanged++
p.mu.Unlock()
default:
- p.printer.CompleteItem("file modified", item, previous, current, s, d)
+ p.printer.CompleteItem("file modified", item, s, d)
p.mu.Lock()
p.summary.Files.Changed++
p.mu.Unlock()
@@ -200,7 +208,7 @@ func (p *Progress) ReportTotal(item string, s archiver.ScanStats) {
if item == "" {
p.scanFinished = true
- p.printer.ReportTotal(item, p.start, s)
+ p.printer.ReportTotal(p.start, s)
}
}
diff --git a/internal/ui/backup/progress_test.go b/internal/ui/backup/progress_test.go
index a7282c7da..79a56c91e 100644
--- a/internal/ui/backup/progress_test.go
+++ b/internal/ui/backup/progress_test.go
@@ -15,12 +15,12 @@ type mockPrinter struct {
id restic.ID
}
-func (p *mockPrinter) Update(total, processed Counter, errors uint, currentFiles map[string]struct{}, start time.Time, secs uint64) {
+func (p *mockPrinter) Update(_, _ Counter, _ uint, _ map[string]struct{}, _ time.Time, _ uint64) {
}
-func (p *mockPrinter) Error(item string, err error) error { return err }
-func (p *mockPrinter) ScannerError(item string, err error) error { return err }
+func (p *mockPrinter) Error(_ string, err error) error { return err }
+func (p *mockPrinter) ScannerError(_ string, err error) error { return err }
-func (p *mockPrinter) CompleteItem(messageType string, item string, previous, current *restic.Node, s archiver.ItemStats, d time.Duration) {
+func (p *mockPrinter) CompleteItem(messageType string, _ string, _ archiver.ItemStats, _ time.Duration) {
p.Lock()
defer p.Unlock()
@@ -32,8 +32,8 @@ func (p *mockPrinter) CompleteItem(messageType string, item string, previous, cu
}
}
-func (p *mockPrinter) ReportTotal(_ string, _ time.Time, _ archiver.ScanStats) {}
-func (p *mockPrinter) Finish(id restic.ID, _ time.Time, summary *Summary, dryRun bool) {
+func (p *mockPrinter) ReportTotal(_ time.Time, _ archiver.ScanStats) {}
+func (p *mockPrinter) Finish(id restic.ID, _ time.Time, summary *Summary, _ bool) {
p.Lock()
defer p.Unlock()
@@ -43,8 +43,8 @@ func (p *mockPrinter) Finish(id restic.ID, _ time.Time, summary *Summary, dryRun
func (p *mockPrinter) Reset() {}
-func (p *mockPrinter) P(msg string, args ...interface{}) {}
-func (p *mockPrinter) V(msg string, args ...interface{}) {}
+func (p *mockPrinter) P(_ string, _ ...interface{}) {}
+func (p *mockPrinter) V(_ string, _ ...interface{}) {}
func TestProgress(t *testing.T) {
t.Parallel()
diff --git a/internal/ui/backup/rate_estimator.go b/internal/ui/backup/rate_estimator.go
new file mode 100644
index 000000000..5291fbae1
--- /dev/null
+++ b/internal/ui/backup/rate_estimator.go
@@ -0,0 +1,98 @@
+package backup
+
+import (
+ "container/list"
+ "time"
+)
+
+// rateBucket represents a one second window of recorded progress.
+type rateBucket struct {
+ totalBytes uint64
+ end time.Time // the end of the time window, exclusive
+}
+
+// rateEstimator represents an estimate of the time to complete an operation.
+type rateEstimator struct {
+ buckets *list.List
+ start time.Time
+ totalBytes uint64
+}
+
+// newRateEstimator returns an estimator initialized to a presumed start time.
+func newRateEstimator(start time.Time) *rateEstimator {
+ return &rateEstimator{buckets: list.New(), start: start}
+}
+
+// See trim(), below.
+const (
+ bucketWidth = time.Second
+ minRateEstimatorBytes = 100 * 1000 * 1000
+ minRateEstimatorBuckets = 20
+ minRateEstimatorMinutes = 2
+)
+
+// trim removes the oldest history from the estimator assuming a given
+// current time.
+func (r *rateEstimator) trim(now time.Time) {
+ // The estimator retains byte transfer counts over a two minute window.
+ // However, to avoid removing too much history when transfer rates are
+ // low, the estimator also retains a minimum number of processed bytes
+ // across a minimum number of buckets. An operation that is processing a
+ // significant number of bytes per second will typically retain only a
+ // two minute window's worth of information. One that is making slow
+ // progress, such as one being over a rate limited connection, typically
+ // observes bursts of updates as infrequently as every ten or twenty
+ // seconds, in which case the other limiters will kick in. This heuristic
+ // avoids wildly fluctuating estimates over rate limited connections.
+ start := now.Add(-minRateEstimatorMinutes * time.Minute)
+
+ for e := r.buckets.Front(); e != nil; e = r.buckets.Front() {
+ if r.buckets.Len() <= minRateEstimatorBuckets {
+ break
+ }
+ b := e.Value.(*rateBucket)
+ if b.end.After(start) {
+ break
+ }
+ total := r.totalBytes - b.totalBytes
+ if total < minRateEstimatorBytes {
+ break
+ }
+ r.start = b.end
+ r.totalBytes = total
+ r.buckets.Remove(e)
+ }
+}
+
+// recordBytes records the transfer of a number of bytes at a given
+// time. Times passed in successive calls should advance monotonically (as
+// is the case with time.Now().
+func (r *rateEstimator) recordBytes(now time.Time, bytes uint64) {
+ if bytes == 0 {
+ return
+ }
+ var tail *rateBucket
+ if r.buckets.Len() > 0 {
+ tail = r.buckets.Back().Value.(*rateBucket)
+ }
+ if tail == nil || !tail.end.After(now) {
+ // The new bucket holds measurements in the time range [now .. now+1sec).
+ tail = &rateBucket{end: now.Add(bucketWidth)}
+ r.buckets.PushBack(tail)
+ }
+ tail.totalBytes += bytes
+ r.totalBytes += bytes
+ r.trim(now)
+}
+
+// rate returns an estimated bytes per second rate at a given time, or zero
+// if there is not enough data to compute a rate.
+func (r *rateEstimator) rate(now time.Time) float64 {
+ r.trim(now)
+ if !r.start.Before(now) {
+ return 0
+ }
+ elapsed := float64(now.Sub(r.start)) / float64(time.Second)
+ rate := float64(r.totalBytes) / elapsed
+ return rate
+}
diff --git a/internal/ui/backup/rate_estimator_test.go b/internal/ui/backup/rate_estimator_test.go
new file mode 100644
index 000000000..0ebc6972b
--- /dev/null
+++ b/internal/ui/backup/rate_estimator_test.go
@@ -0,0 +1,213 @@
+package backup
+
+import (
+ "fmt"
+ "math"
+ "testing"
+ "time"
+
+ rtest "github.com/restic/restic/internal/test"
+)
+
+const float64EqualityThreshold = 1e-6
+
+func almostEqual(a, b float64) bool {
+ if math.IsNaN(a) || math.IsNaN(b) {
+ panic("almostEqual passed a NaN")
+ }
+ return math.Abs(a-b) <= float64EqualityThreshold
+}
+
+func TestEstimatorDefault(t *testing.T) {
+ var start time.Time
+ e := newRateEstimator(start)
+ r := e.rate(start)
+ rtest.Assert(t, r == 0, "e.Rate == %v, want zero", r)
+ r = e.rate(start.Add(time.Hour))
+ rtest.Assert(t, r == 0, "e.Rate == %v, want zero", r)
+}
+
+func TestEstimatorSimple(t *testing.T) {
+ var start time.Time
+ type testcase struct {
+ bytes uint64
+ when time.Duration
+ rate float64
+ }
+
+ cases := []testcase{
+ {0, 0, 0},
+ {1, time.Second, 1},
+ {60, time.Second, 60},
+ {60, time.Minute, 1},
+ }
+ for _, c := range cases {
+ name := fmt.Sprintf("%+v", c)
+ t.Run(name, func(t *testing.T) {
+ e := newRateEstimator(start)
+ e.recordBytes(start.Add(time.Second), c.bytes)
+ rate := e.rate(start.Add(c.when))
+ rtest.Assert(t, almostEqual(rate, c.rate), "e.Rate == %v, want %v", rate, c.rate)
+ })
+ }
+}
+
+func TestBucketWidth(t *testing.T) {
+ var when time.Time
+
+ // Recording byte transfers within a bucket width's time window uses one
+ // bucket.
+ e := newRateEstimator(when)
+ e.recordBytes(when, 1)
+ e.recordBytes(when.Add(bucketWidth-time.Nanosecond), 1)
+ rtest.Assert(t, e.buckets.Len() == 1, "e.buckets.Len() is %d, want 1", e.buckets.Len())
+
+ b := e.buckets.Back().Value.(*rateBucket)
+ rtest.Assert(t, b.totalBytes == 2, "b.totalBytes is %d, want 2", b.totalBytes)
+ rtest.Assert(t, b.end == when.Add(bucketWidth), "b.end is %v, want %v", b.end, when.Add(bucketWidth))
+
+ // Recording a byte outside the bucket width causes another bucket.
+ e.recordBytes(when.Add(bucketWidth), 1)
+ rtest.Assert(t, e.buckets.Len() == 2, "e.buckets.Len() is %d, want 2", e.buckets.Len())
+
+ b = e.buckets.Back().Value.(*rateBucket)
+ rtest.Assert(t, b.totalBytes == 1, "b.totalBytes is %d, want 1", b.totalBytes)
+ rtest.Assert(t, b.end == when.Add(2*bucketWidth), "b.end is %v, want %v", b.end, when.Add(bucketWidth))
+
+ // Recording a byte after a longer delay creates a sparse bucket list.
+ e.recordBytes(when.Add(time.Hour+time.Millisecond), 7)
+ rtest.Assert(t, e.buckets.Len() == 3, "e.buckets.Len() is %d, want 3", e.buckets.Len())
+
+ b = e.buckets.Back().Value.(*rateBucket)
+ rtest.Assert(t, b.totalBytes == 7, "b.totalBytes is %d, want 7", b.totalBytes)
+ rtest.Equals(t, when.Add(time.Hour+time.Millisecond+time.Second), b.end)
+}
+
+type chunk struct {
+ repetitions uint64 // repetition count
+ bytes uint64 // byte count (every second)
+}
+
+func applyChunks(chunks []chunk, t time.Time, e *rateEstimator) time.Time {
+ for _, c := range chunks {
+ for i := uint64(0); i < c.repetitions; i++ {
+ e.recordBytes(t, c.bytes)
+ t = t.Add(time.Second)
+ }
+ }
+ return t
+}
+
+func TestEstimatorResponsiveness(t *testing.T) {
+ type testcase struct {
+ description string
+ chunks []chunk
+ rate float64
+ }
+
+ cases := []testcase{
+ {
+ "1000 bytes/sec over one second",
+ []chunk{
+ {1, 1000},
+ },
+ 1000,
+ },
+ {
+ "1000 bytes/sec over one minute",
+ []chunk{
+ {60, 1000},
+ },
+ 1000,
+ },
+ {
+ "1000 bytes/sec for 10 seconds, then 2000 bytes/sec for 10 seconds",
+ []chunk{
+ {10, 1000},
+ {10, 2000},
+ },
+ 1500,
+ },
+ {
+ "1000 bytes/sec for one minute, then 2000 bytes/sec for one minute",
+ []chunk{
+ {60, 1000},
+ {60, 2000},
+ },
+ 1500,
+ },
+ {
+ "rate doubles after 30 seconds",
+ []chunk{
+ {30, minRateEstimatorBytes},
+ {90, 2 * minRateEstimatorBytes},
+ },
+ minRateEstimatorBytes * 1.75,
+ },
+ {
+ "rate doubles after 31 seconds",
+ []chunk{
+ {31, minRateEstimatorBytes},
+ {90, 2 * minRateEstimatorBytes},
+ },
+ // The expected rate is the same as the prior test case because the
+ // first second has rolled off the estimator.
+ minRateEstimatorBytes * 1.75,
+ },
+ {
+ "rate doubles after 90 seconds",
+ []chunk{
+ {90, minRateEstimatorBytes},
+ {90, 2 * minRateEstimatorBytes},
+ },
+ // The expected rate is the same as the prior test case because the
+ // first 60 seconds have rolled off the estimator.
+ minRateEstimatorBytes * 1.75,
+ },
+ {
+ "rate doubles for two full minutes",
+ []chunk{
+ {60, minRateEstimatorBytes},
+ {120, 2 * minRateEstimatorBytes},
+ },
+ 2 * minRateEstimatorBytes,
+ },
+ {
+ "rate falls to zero",
+ []chunk{
+ {30, minRateEstimatorBytes},
+ {30, 0},
+ },
+ minRateEstimatorBytes / 2,
+ },
+ {
+ "rate falls to zero for extended time",
+ []chunk{
+ {60, 1000},
+ {300, 0},
+ },
+ 1000 * 60 / (60 + 300.0),
+ },
+ {
+ "rate falls to zero for extended time (from high rate)",
+ []chunk{
+ {2 * minRateEstimatorBuckets, minRateEstimatorBytes},
+ {300, 0},
+ },
+ // Expect that only minRateEstimatorBuckets buckets are used in the
+ // rate estimate.
+ minRateEstimatorBytes * minRateEstimatorBuckets /
+ (minRateEstimatorBuckets + 300.0),
+ },
+ }
+
+ for _, c := range cases {
+ t.Run(c.description, func(t *testing.T) {
+ var w time.Time
+ e := newRateEstimator(w)
+ w = applyChunks(c.chunks, w, e)
+ r := e.rate(w)
+ rtest.Assert(t, almostEqual(r, c.rate), "e.Rate == %f, want %f", r, c.rate)
+ })
+ }
+}
diff --git a/internal/ui/backup/text.go b/internal/ui/backup/text.go
index acb2a8d3a..215982cd4 100644
--- a/internal/ui/backup/text.go
+++ b/internal/ui/backup/text.go
@@ -72,20 +72,20 @@ func (b *TextProgress) Update(total, processed Counter, errors uint, currentFile
// ScannerError is the error callback function for the scanner, it prints the
// error in verbose mode and returns nil.
-func (b *TextProgress) ScannerError(item string, err error) error {
+func (b *TextProgress) ScannerError(_ string, err error) error {
b.V("scan: %v\n", err)
return nil
}
// Error is the error callback function for the archiver, it prints the error and returns nil.
-func (b *TextProgress) Error(item string, err error) error {
+func (b *TextProgress) Error(_ string, err error) error {
b.E("error: %v\n", err)
return nil
}
// CompleteItem is the status callback function for the archiver when a
// file/dir has been saved successfully.
-func (b *TextProgress) CompleteItem(messageType, item string, previous, current *restic.Node, s archiver.ItemStats, d time.Duration) {
+func (b *TextProgress) CompleteItem(messageType, item string, s archiver.ItemStats, d time.Duration) {
item = termstatus.Quote(item)
switch messageType {
@@ -111,7 +111,7 @@ func (b *TextProgress) CompleteItem(messageType, item string, previous, current
}
// ReportTotal sets the total stats up to now
-func (b *TextProgress) ReportTotal(item string, start time.Time, s archiver.ScanStats) {
+func (b *TextProgress) ReportTotal(start time.Time, s archiver.ScanStats) {
b.V("scan finished in %.3fs: %v files, %s",
time.Since(start).Seconds(),
s.Files, ui.FormatBytes(s.Bytes),
@@ -126,7 +126,7 @@ func (b *TextProgress) Reset() {
}
// Finish prints the finishing messages.
-func (b *TextProgress) Finish(snapshotID restic.ID, start time.Time, summary *Summary, dryRun bool) {
+func (b *TextProgress) Finish(_ restic.ID, start time.Time, summary *Summary, dryRun bool) {
b.P("\n")
b.P("Files: %5d new, %5d changed, %5d unmodified\n", summary.Files.New, summary.Files.Changed, summary.Files.Unchanged)
b.P("Dirs: %5d new, %5d changed, %5d unmodified\n", summary.Dirs.New, summary.Dirs.Changed, summary.Dirs.Unchanged)
diff --git a/internal/ui/format.go b/internal/ui/format.go
index 13d02f9e3..d2e0a4d2b 100644
--- a/internal/ui/format.go
+++ b/internal/ui/format.go
@@ -1,7 +1,12 @@
package ui
import (
+ "bytes"
+ "encoding/json"
+ "errors"
"fmt"
+ "math/bits"
+ "strconv"
"time"
)
@@ -53,3 +58,50 @@ func FormatSeconds(sec uint64) string {
}
return fmt.Sprintf("%d:%02d", min, sec)
}
+
+// ParseBytes parses a size in bytes from s. It understands the suffixes
+// B, K, M, G and T for powers of 1024.
+func ParseBytes(s string) (int64, error) {
+ if s == "" {
+ return 0, errors.New("expected size, got empty string")
+ }
+
+ numStr := s[:len(s)-1]
+ var unit uint64 = 1
+
+ switch s[len(s)-1] {
+ case 'b', 'B':
+ // use initialized values, do nothing here
+ case 'k', 'K':
+ unit = 1024
+ case 'm', 'M':
+ unit = 1024 * 1024
+ case 'g', 'G':
+ unit = 1024 * 1024 * 1024
+ case 't', 'T':
+ unit = 1024 * 1024 * 1024 * 1024
+ default:
+ numStr = s
+ }
+ value, err := strconv.ParseInt(numStr, 10, 64)
+ if err != nil {
+ return 0, err
+ }
+
+ hi, lo := bits.Mul64(uint64(value), unit)
+ value = int64(lo)
+ if hi != 0 || value < 0 {
+ return 0, fmt.Errorf("ParseSize: %q: %w", numStr, strconv.ErrRange)
+ }
+
+ return value, nil
+}
+
+func ToJSONString(status interface{}) string {
+ buf := new(bytes.Buffer)
+ err := json.NewEncoder(buf).Encode(status)
+ if err != nil {
+ panic(err)
+ }
+ return buf.String()
+}
diff --git a/internal/ui/format_test.go b/internal/ui/format_test.go
index b6a1c13d1..4223d4e20 100644
--- a/internal/ui/format_test.go
+++ b/internal/ui/format_test.go
@@ -1,6 +1,10 @@
package ui
-import "testing"
+import (
+ "testing"
+
+ "github.com/restic/restic/internal/test"
+)
func TestFormatBytes(t *testing.T) {
for _, c := range []struct {
@@ -36,3 +40,47 @@ func TestFormatPercent(t *testing.T) {
}
}
}
+
+func TestParseBytes(t *testing.T) {
+ for _, tt := range []struct {
+ in string
+ expected int64
+ }{
+ {"1024", 1024},
+ {"1024b", 1024},
+ {"1024B", 1024},
+ {"1k", 1024},
+ {"100k", 102400},
+ {"100K", 102400},
+ {"10M", 10485760},
+ {"100m", 104857600},
+ {"20G", 21474836480},
+ {"10g", 10737418240},
+ {"2T", 2199023255552},
+ {"2t", 2199023255552},
+ {"9223372036854775807", 1<<63 - 1},
+ } {
+ actual, err := ParseBytes(tt.in)
+ test.OK(t, err)
+ test.Equals(t, tt.expected, actual)
+ }
+}
+
+func TestParseBytesInvalid(t *testing.T) {
+ for _, s := range []string{
+ "",
+ " ",
+ "foobar",
+ "zzz",
+ "18446744073709551615", // 1<<64-1.
+ "9223372036854775807k", // 1<<63-1 kiB.
+ "9999999999999M",
+ "99999999999999999999",
+ } {
+ v, err := ParseBytes(s)
+ if err == nil {
+ t.Errorf("wanted error for invalid value %q, got nil", s)
+ }
+ test.Equals(t, int64(0), v)
+ }
+}
diff --git a/internal/ui/progress/counter_test.go b/internal/ui/progress/counter_test.go
index 49c694e06..a0bb22d5a 100644
--- a/internal/ui/progress/counter_test.go
+++ b/internal/ui/progress/counter_test.go
@@ -59,7 +59,7 @@ func TestCounter(t *testing.T) {
t.Log("number of calls:", ncalls)
}
-func TestCounterNil(t *testing.T) {
+func TestCounterNil(_ *testing.T) {
// Shouldn't panic.
var c *progress.Counter
c.Add(1)
diff --git a/internal/ui/progress/updater_test.go b/internal/ui/progress/updater_test.go
index 5b5207dd5..45db019ba 100644
--- a/internal/ui/progress/updater_test.go
+++ b/internal/ui/progress/updater_test.go
@@ -26,7 +26,8 @@ func TestUpdater(t *testing.T) {
test.Assert(t, ncalls > 0, "no progress was reported")
}
-func TestUpdaterStopTwice(t *testing.T) {
+func TestUpdaterStopTwice(_ *testing.T) {
+ // must not panic
c := progress.NewUpdater(0, func(runtime time.Duration, final bool) {})
c.Done()
c.Done()
diff --git a/internal/ui/restore/json.go b/internal/ui/restore/json.go
new file mode 100644
index 000000000..c1b95b00b
--- /dev/null
+++ b/internal/ui/restore/json.go
@@ -0,0 +1,69 @@
+package restore
+
+import (
+ "time"
+
+ "github.com/restic/restic/internal/ui"
+)
+
+type jsonPrinter struct {
+ terminal term
+}
+
+func NewJSONProgress(terminal term) ProgressPrinter {
+ return &jsonPrinter{
+ terminal: terminal,
+ }
+}
+
+func (t *jsonPrinter) print(status interface{}) {
+ t.terminal.Print(ui.ToJSONString(status))
+}
+
+func (t *jsonPrinter) Update(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, duration time.Duration) {
+ status := statusUpdate{
+ MessageType: "status",
+ SecondsElapsed: uint64(duration / time.Second),
+ TotalFiles: filesTotal,
+ FilesRestored: filesFinished,
+ TotalBytes: allBytesTotal,
+ BytesRestored: allBytesWritten,
+ }
+
+ if allBytesTotal > 0 {
+ status.PercentDone = float64(allBytesWritten) / float64(allBytesTotal)
+ }
+
+ t.print(status)
+}
+
+func (t *jsonPrinter) Finish(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, duration time.Duration) {
+ status := summaryOutput{
+ MessageType: "summary",
+ SecondsElapsed: uint64(duration / time.Second),
+ TotalFiles: filesTotal,
+ FilesRestored: filesFinished,
+ TotalBytes: allBytesTotal,
+ BytesRestored: allBytesWritten,
+ }
+ t.print(status)
+}
+
+type statusUpdate struct {
+ MessageType string `json:"message_type"` // "status"
+ SecondsElapsed uint64 `json:"seconds_elapsed,omitempty"`
+ PercentDone float64 `json:"percent_done"`
+ TotalFiles uint64 `json:"total_files,omitempty"`
+ FilesRestored uint64 `json:"files_restored,omitempty"`
+ TotalBytes uint64 `json:"total_bytes,omitempty"`
+ BytesRestored uint64 `json:"bytes_restored,omitempty"`
+}
+
+type summaryOutput struct {
+ MessageType string `json:"message_type"` // "summary"
+ SecondsElapsed uint64 `json:"seconds_elapsed,omitempty"`
+ TotalFiles uint64 `json:"total_files,omitempty"`
+ FilesRestored uint64 `json:"files_restored,omitempty"`
+ TotalBytes uint64 `json:"total_bytes,omitempty"`
+ BytesRestored uint64 `json:"bytes_restored,omitempty"`
+}
diff --git a/internal/ui/restore/json_test.go b/internal/ui/restore/json_test.go
new file mode 100644
index 000000000..7bcabb4d7
--- /dev/null
+++ b/internal/ui/restore/json_test.go
@@ -0,0 +1,29 @@
+package restore
+
+import (
+ "testing"
+ "time"
+
+ "github.com/restic/restic/internal/test"
+)
+
+func TestJSONPrintUpdate(t *testing.T) {
+ term := &mockTerm{}
+ printer := NewJSONProgress(term)
+ printer.Update(3, 11, 29, 47, 5*time.Second)
+ test.Equals(t, []string{"{\"message_type\":\"status\",\"seconds_elapsed\":5,\"percent_done\":0.6170212765957447,\"total_files\":11,\"files_restored\":3,\"total_bytes\":47,\"bytes_restored\":29}\n"}, term.output)
+}
+
+func TestJSONPrintSummaryOnSuccess(t *testing.T) {
+ term := &mockTerm{}
+ printer := NewJSONProgress(term)
+ printer.Finish(11, 11, 47, 47, 5*time.Second)
+ test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":11,\"total_bytes\":47,\"bytes_restored\":47}\n"}, term.output)
+}
+
+func TestJSONPrintSummaryOnErrors(t *testing.T) {
+ term := &mockTerm{}
+ printer := NewJSONProgress(term)
+ printer.Finish(3, 11, 29, 47, 5*time.Second)
+ test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":3,\"total_bytes\":47,\"bytes_restored\":29}\n"}, term.output)
+}
diff --git a/internal/ui/restore/progress.go b/internal/ui/restore/progress.go
new file mode 100644
index 000000000..f2bd5d38b
--- /dev/null
+++ b/internal/ui/restore/progress.go
@@ -0,0 +1,90 @@
+package restore
+
+import (
+ "sync"
+ "time"
+
+ "github.com/restic/restic/internal/ui/progress"
+)
+
+type Progress struct {
+ updater progress.Updater
+ m sync.Mutex
+
+ progressInfoMap map[string]progressInfoEntry
+ filesFinished uint64
+ filesTotal uint64
+ allBytesWritten uint64
+ allBytesTotal uint64
+ started time.Time
+
+ printer ProgressPrinter
+}
+
+type progressInfoEntry struct {
+ bytesWritten uint64
+ bytesTotal uint64
+}
+
+type term interface {
+ Print(line string)
+ SetStatus(lines []string)
+}
+
+type ProgressPrinter interface {
+ Update(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, duration time.Duration)
+ Finish(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, duration time.Duration)
+}
+
+func NewProgress(printer ProgressPrinter, interval time.Duration) *Progress {
+ p := &Progress{
+ progressInfoMap: make(map[string]progressInfoEntry),
+ started: time.Now(),
+ printer: printer,
+ }
+ p.updater = *progress.NewUpdater(interval, p.update)
+ return p
+}
+
+func (p *Progress) update(runtime time.Duration, final bool) {
+ p.m.Lock()
+ defer p.m.Unlock()
+
+ if !final {
+ p.printer.Update(p.filesFinished, p.filesTotal, p.allBytesWritten, p.allBytesTotal, runtime)
+ } else {
+ p.printer.Finish(p.filesFinished, p.filesTotal, p.allBytesWritten, p.allBytesTotal, runtime)
+ }
+}
+
+// AddFile starts tracking a new file with the given size
+func (p *Progress) AddFile(size uint64) {
+ p.m.Lock()
+ defer p.m.Unlock()
+
+ p.filesTotal++
+ p.allBytesTotal += size
+}
+
+// AddProgress accumulates the number of bytes written for a file
+func (p *Progress) AddProgress(name string, bytesWrittenPortion uint64, bytesTotal uint64) {
+ p.m.Lock()
+ defer p.m.Unlock()
+
+ entry, exists := p.progressInfoMap[name]
+ if !exists {
+ entry.bytesTotal = bytesTotal
+ }
+ entry.bytesWritten += bytesWrittenPortion
+ p.progressInfoMap[name] = entry
+
+ p.allBytesWritten += bytesWrittenPortion
+ if entry.bytesWritten == entry.bytesTotal {
+ delete(p.progressInfoMap, name)
+ p.filesFinished++
+ }
+}
+
+func (p *Progress) Finish() {
+ p.updater.Done()
+}
diff --git a/internal/ui/restore/progress_test.go b/internal/ui/restore/progress_test.go
new file mode 100644
index 000000000..9e625aa20
--- /dev/null
+++ b/internal/ui/restore/progress_test.go
@@ -0,0 +1,137 @@
+package restore
+
+import (
+ "testing"
+ "time"
+
+ "github.com/restic/restic/internal/test"
+)
+
+type printerTraceEntry struct {
+ filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64
+
+ duration time.Duration
+ isFinished bool
+}
+
+type printerTrace []printerTraceEntry
+
+type mockPrinter struct {
+ trace printerTrace
+}
+
+const mockFinishDuration = 42 * time.Second
+
+func (p *mockPrinter) Update(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, duration time.Duration) {
+ p.trace = append(p.trace, printerTraceEntry{filesFinished, filesTotal, allBytesWritten, allBytesTotal, duration, false})
+}
+func (p *mockPrinter) Finish(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, _ time.Duration) {
+ p.trace = append(p.trace, printerTraceEntry{filesFinished, filesTotal, allBytesWritten, allBytesTotal, mockFinishDuration, true})
+}
+
+func testProgress(fn func(progress *Progress) bool) printerTrace {
+ printer := &mockPrinter{}
+ progress := NewProgress(printer, 0)
+ final := fn(progress)
+ progress.update(0, final)
+ trace := append(printerTrace{}, printer.trace...)
+ // cleanup to avoid goroutine leak, but copy trace first
+ progress.Finish()
+ return trace
+}
+
+func TestNew(t *testing.T) {
+ result := testProgress(func(progress *Progress) bool {
+ return false
+ })
+ test.Equals(t, printerTrace{
+ printerTraceEntry{0, 0, 0, 0, 0, false},
+ }, result)
+}
+
+func TestAddFile(t *testing.T) {
+ fileSize := uint64(100)
+
+ result := testProgress(func(progress *Progress) bool {
+ progress.AddFile(fileSize)
+ return false
+ })
+ test.Equals(t, printerTrace{
+ printerTraceEntry{0, 1, 0, fileSize, 0, false},
+ }, result)
+}
+
+func TestFirstProgressOnAFile(t *testing.T) {
+ expectedBytesWritten := uint64(5)
+ expectedBytesTotal := uint64(100)
+
+ result := testProgress(func(progress *Progress) bool {
+ progress.AddFile(expectedBytesTotal)
+ progress.AddProgress("test", expectedBytesWritten, expectedBytesTotal)
+ return false
+ })
+ test.Equals(t, printerTrace{
+ printerTraceEntry{0, 1, expectedBytesWritten, expectedBytesTotal, 0, false},
+ }, result)
+}
+
+func TestLastProgressOnAFile(t *testing.T) {
+ fileSize := uint64(100)
+
+ result := testProgress(func(progress *Progress) bool {
+ progress.AddFile(fileSize)
+ progress.AddProgress("test", 30, fileSize)
+ progress.AddProgress("test", 35, fileSize)
+ progress.AddProgress("test", 35, fileSize)
+ return false
+ })
+ test.Equals(t, printerTrace{
+ printerTraceEntry{1, 1, fileSize, fileSize, 0, false},
+ }, result)
+}
+
+func TestLastProgressOnLastFile(t *testing.T) {
+ fileSize := uint64(100)
+
+ result := testProgress(func(progress *Progress) bool {
+ progress.AddFile(fileSize)
+ progress.AddFile(50)
+ progress.AddProgress("test1", 50, 50)
+ progress.AddProgress("test2", 50, fileSize)
+ progress.AddProgress("test2", 50, fileSize)
+ return false
+ })
+ test.Equals(t, printerTrace{
+ printerTraceEntry{2, 2, 50 + fileSize, 50 + fileSize, 0, false},
+ }, result)
+}
+
+func TestSummaryOnSuccess(t *testing.T) {
+ fileSize := uint64(100)
+
+ result := testProgress(func(progress *Progress) bool {
+ progress.AddFile(fileSize)
+ progress.AddFile(50)
+ progress.AddProgress("test1", 50, 50)
+ progress.AddProgress("test2", fileSize, fileSize)
+ return true
+ })
+ test.Equals(t, printerTrace{
+ printerTraceEntry{2, 2, 50 + fileSize, 50 + fileSize, mockFinishDuration, true},
+ }, result)
+}
+
+func TestSummaryOnErrors(t *testing.T) {
+ fileSize := uint64(100)
+
+ result := testProgress(func(progress *Progress) bool {
+ progress.AddFile(fileSize)
+ progress.AddFile(50)
+ progress.AddProgress("test1", 50, 50)
+ progress.AddProgress("test2", fileSize/2, fileSize)
+ return true
+ })
+ test.Equals(t, printerTrace{
+ printerTraceEntry{1, 2, 50 + fileSize/2, 50 + fileSize, mockFinishDuration, true},
+ }, result)
+}
diff --git a/internal/ui/restore/text.go b/internal/ui/restore/text.go
new file mode 100644
index 000000000..e6465eed0
--- /dev/null
+++ b/internal/ui/restore/text.go
@@ -0,0 +1,47 @@
+package restore
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/restic/restic/internal/ui"
+)
+
+type textPrinter struct {
+ terminal term
+}
+
+func NewTextProgress(terminal term) ProgressPrinter {
+ return &textPrinter{
+ terminal: terminal,
+ }
+}
+
+func (t *textPrinter) Update(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, duration time.Duration) {
+ timeLeft := ui.FormatDuration(duration)
+ formattedAllBytesWritten := ui.FormatBytes(allBytesWritten)
+ formattedAllBytesTotal := ui.FormatBytes(allBytesTotal)
+ allPercent := ui.FormatPercent(allBytesWritten, allBytesTotal)
+ progress := fmt.Sprintf("[%s] %s %v files %s, total %v files %v",
+ timeLeft, allPercent, filesFinished, formattedAllBytesWritten, filesTotal, formattedAllBytesTotal)
+
+ t.terminal.SetStatus([]string{progress})
+}
+
+func (t *textPrinter) Finish(filesFinished, filesTotal, allBytesWritten, allBytesTotal uint64, duration time.Duration) {
+ t.terminal.SetStatus([]string{})
+
+ timeLeft := ui.FormatDuration(duration)
+ formattedAllBytesTotal := ui.FormatBytes(allBytesTotal)
+
+ var summary string
+ if filesFinished == filesTotal && allBytesWritten == allBytesTotal {
+ summary = fmt.Sprintf("Summary: Restored %d Files (%s) in %s", filesTotal, formattedAllBytesTotal, timeLeft)
+ } else {
+ formattedAllBytesWritten := ui.FormatBytes(allBytesWritten)
+ summary = fmt.Sprintf("Summary: Restored %d / %d Files (%s / %s) in %s",
+ filesFinished, filesTotal, formattedAllBytesWritten, formattedAllBytesTotal, timeLeft)
+ }
+
+ t.terminal.Print(summary)
+}
diff --git a/internal/ui/restore/text_test.go b/internal/ui/restore/text_test.go
new file mode 100644
index 000000000..2a8c90878
--- /dev/null
+++ b/internal/ui/restore/text_test.go
@@ -0,0 +1,41 @@
+package restore
+
+import (
+ "testing"
+ "time"
+
+ "github.com/restic/restic/internal/test"
+)
+
+type mockTerm struct {
+ output []string
+}
+
+func (m *mockTerm) Print(line string) {
+ m.output = append(m.output, line)
+}
+
+func (m *mockTerm) SetStatus(lines []string) {
+ m.output = append([]string{}, lines...)
+}
+
+func TestPrintUpdate(t *testing.T) {
+ term := &mockTerm{}
+ printer := NewTextProgress(term)
+ printer.Update(3, 11, 29, 47, 5*time.Second)
+ test.Equals(t, []string{"[0:05] 61.70% 3 files 29 B, total 11 files 47 B"}, term.output)
+}
+
+func TestPrintSummaryOnSuccess(t *testing.T) {
+ term := &mockTerm{}
+ printer := NewTextProgress(term)
+ printer.Finish(11, 11, 47, 47, 5*time.Second)
+ test.Equals(t, []string{"Summary: Restored 11 Files (47 B) in 0:05"}, term.output)
+}
+
+func TestPrintSummaryOnErrors(t *testing.T) {
+ term := &mockTerm{}
+ printer := NewTextProgress(term)
+ printer.Finish(3, 11, 29, 47, 5*time.Second)
+ test.Equals(t, []string{"Summary: Restored 3 / 11 Files (29 B / 47 B) in 0:05"}, term.output)
+}
diff --git a/internal/ui/table/table.go b/internal/ui/table/table.go
index e14ea263c..c3ae47f54 100644
--- a/internal/ui/table/table.go
+++ b/internal/ui/table/table.go
@@ -177,10 +177,10 @@ func (t *Table) Write(w io.Writer) error {
// write all the lines
for i, line := range lines {
- print := func(w io.Writer, s string) error {
+ printer := func(w io.Writer, s string) error {
return t.PrintData(w, i, s)
}
- err := printLine(w, print, t.CellSeparator, line, columnWidths)
+ err := printLine(w, printer, t.CellSeparator, line, columnWidths)
if err != nil {
return err
}
diff --git a/internal/ui/termstatus/background_linux.go b/internal/ui/termstatus/background_linux.go
index 2c32faf17..db96c2c53 100644
--- a/internal/ui/termstatus/background_linux.go
+++ b/internal/ui/termstatus/background_linux.go
@@ -18,6 +18,10 @@ func IsProcessBackground(fd uintptr) bool {
}
func isProcessBackground(fd uintptr) (bool, error) {
- pid, err := unix.IoctlGetInt(int(fd), unix.TIOCGPGRP)
- return pid != unix.Getpgrp(), err
+ // We need to use IoctlGetUint32 here, because pid_t is 32-bit even on
+ // 64-bit Linux. IoctlGetInt doesn't work on big-endian platforms:
+ // https://github.com/golang/go/issues/45585
+ // https://github.com/golang/go/issues/60429
+ pid, err := unix.IoctlGetUint32(int(fd), unix.TIOCGPGRP)
+ return int(pid) != unix.Getpgrp(), err
}
diff --git a/internal/ui/termstatus/status.go b/internal/ui/termstatus/status.go
index a1b7a5fcc..6e8ddfe7c 100644
--- a/internal/ui/termstatus/status.go
+++ b/internal/ui/termstatus/status.go
@@ -74,8 +74,8 @@ func New(wr io.Writer, errWriter io.Writer, disableStatus bool) *Terminal {
// only use the fancy status code when we're running on a real terminal.
t.canUpdateStatus = true
t.fd = d.Fd()
- t.clearCurrentLine = clearCurrentLine(wr, t.fd)
- t.moveCursorUp = moveCursorUp(wr, t.fd)
+ t.clearCurrentLine = clearCurrentLine(t.fd)
+ t.moveCursorUp = moveCursorUp(t.fd)
}
return t
@@ -303,26 +303,50 @@ func Truncate(s string, w int) string {
return s
}
- for i, r := range s {
+ for i := uint(0); i < uint(len(s)); {
+ utfsize := uint(1) // UTF-8 encoding size of first rune in s.
w--
- if r > unicode.MaxASCII && wideRune(r) {
- w--
+
+ if s[i] > unicode.MaxASCII {
+ var wide bool
+ if wide, utfsize = wideRune(s[i:]); wide {
+ w--
+ }
}
if w < 0 {
return s[:i]
}
+ i += utfsize
}
return s
}
-// Guess whether r would occupy two terminal cells instead of one.
-// This cannot be determined exactly without knowing the terminal font,
-// so we treat all ambigous runes as full-width, i.e., two cells.
-func wideRune(r rune) bool {
- kind := width.LookupRune(r).Kind()
- return kind != width.Neutral && kind != width.EastAsianNarrow
+// Guess whether the first rune in s would occupy two terminal cells
+// instead of one. This cannot be determined exactly without knowing
+// the terminal font, so we treat all ambigous runes as full-width,
+// i.e., two cells.
+func wideRune(s string) (wide bool, utfsize uint) {
+ prop, size := width.LookupString(s)
+ kind := prop.Kind()
+ wide = kind != width.Neutral && kind != width.EastAsianNarrow
+ return wide, uint(size)
+}
+
+func sanitizeLines(lines []string, width int) []string {
+ // Sanitize lines and truncate them if they're too long.
+ for i, line := range lines {
+ line = Quote(line)
+ if width > 0 {
+ line = Truncate(line, width-2)
+ }
+ if i < len(lines)-1 { // Last line gets no line break.
+ line += "\n"
+ }
+ lines[i] = line
+ }
+ return lines
}
// SetStatus updates the status lines.
@@ -343,16 +367,7 @@ func (t *Terminal) SetStatus(lines []string) {
}
}
- // Sanitize lines and truncate them if they're too long.
- for i, line := range lines {
- line = Quote(line)
- if width > 0 {
- line = Truncate(line, width-2)
- }
- if i < len(lines)-1 { // Last line gets no line break.
- lines[i] = line + "\n"
- }
- }
+ sanitizeLines(lines, width)
select {
case t.status <- status{lines: lines}:
diff --git a/internal/ui/termstatus/status_test.go b/internal/ui/termstatus/status_test.go
index 40a908deb..b59063076 100644
--- a/internal/ui/termstatus/status_test.go
+++ b/internal/ui/termstatus/status_test.go
@@ -1,12 +1,54 @@
package termstatus
import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
"strconv"
"testing"
rtest "github.com/restic/restic/internal/test"
)
+func TestSetStatus(t *testing.T) {
+ var buf bytes.Buffer
+ term := New(&buf, io.Discard, false)
+
+ term.canUpdateStatus = true
+ term.fd = ^uintptr(0)
+ term.clearCurrentLine = posixClearCurrentLine
+ term.moveCursorUp = posixMoveCursorUp
+
+ ctx, cancel := context.WithCancel(context.Background())
+ go term.Run(ctx)
+
+ const (
+ clear = posixControlClearLine
+ home = posixControlMoveCursorHome
+ up = posixControlMoveCursorUp
+ )
+
+ term.SetStatus([]string{"first"})
+ exp := home + clear + "first" + home
+
+ term.SetStatus([]string{"foo", "bar", "baz"})
+ exp += home + clear + "foo\n" + home + clear + "bar\n" +
+ home + clear + "baz" + home + up + up
+
+ term.SetStatus([]string{"quux", "needs\nquote"})
+ exp += home + clear + "quux\n" +
+ home + clear + "\"needs\\nquote\"\n" +
+ home + clear + home + up + up // Third line implicit.
+
+ cancel()
+ exp += home + clear + "\n" + home + clear + "\n" +
+ home + up + up // Status cleared.
+
+ <-term.closed
+ rtest.Equals(t, exp, buf.String())
+}
+
func TestQuote(t *testing.T) {
for _, c := range []struct {
in string
@@ -79,11 +121,35 @@ func BenchmarkTruncateASCII(b *testing.B) {
func BenchmarkTruncateUnicode(b *testing.B) {
s := "Hello World or Καλημέρα κόσμε or こんにちは 世界"
w := 0
- for _, r := range s {
+ for i := 0; i < len(s); {
w++
- if wideRune(r) {
+ wide, utfsize := wideRune(s[i:])
+ if wide {
w++
}
+ i += int(utfsize)
}
+ b.ResetTimer()
+
benchmarkTruncate(b, s, w-1)
}
+
+func TestSanitizeLines(t *testing.T) {
+ var tests = []struct {
+ input []string
+ width int
+ output []string
+ }{
+ {[]string{""}, 80, []string{""}},
+ {[]string{"too long test line"}, 10, []string{"too long"}},
+ {[]string{"too long test line", "text"}, 10, []string{"too long\n", "text"}},
+ {[]string{"too long test line", "second long test line"}, 10, []string{"too long\n", "second l"}},
+ }
+
+ for _, test := range tests {
+ t.Run(fmt.Sprintf("%s %d", test.input, test.width), func(t *testing.T) {
+ out := sanitizeLines(test.input, test.width)
+ rtest.Equals(t, test.output, out)
+ })
+ }
+}
diff --git a/internal/ui/termstatus/terminal_posix.go b/internal/ui/termstatus/terminal_posix.go
index c16a2d989..ca5468f45 100644
--- a/internal/ui/termstatus/terminal_posix.go
+++ b/internal/ui/termstatus/terminal_posix.go
@@ -15,7 +15,7 @@ const (
// posixClearCurrentLine removes all characters from the current line and resets the
// cursor position to the first column.
-func posixClearCurrentLine(wr io.Writer, fd uintptr) {
+func posixClearCurrentLine(wr io.Writer, _ uintptr) {
// clear current line
_, err := wr.Write([]byte(posixControlMoveCursorHome + posixControlClearLine))
if err != nil {
@@ -25,7 +25,7 @@ func posixClearCurrentLine(wr io.Writer, fd uintptr) {
}
// posixMoveCursorUp moves the cursor to the line n lines above the current one.
-func posixMoveCursorUp(wr io.Writer, fd uintptr, n int) {
+func posixMoveCursorUp(wr io.Writer, _ uintptr, n int) {
data := []byte(posixControlMoveCursorHome)
data = append(data, bytes.Repeat([]byte(posixControlMoveCursorUp), n)...)
_, err := wr.Write(data)
diff --git a/internal/ui/termstatus/terminal_unix.go b/internal/ui/termstatus/terminal_unix.go
index 719016939..e112be233 100644
--- a/internal/ui/termstatus/terminal_unix.go
+++ b/internal/ui/termstatus/terminal_unix.go
@@ -12,12 +12,12 @@ import (
// clearCurrentLine removes all characters from the current line and resets the
// cursor position to the first column.
-func clearCurrentLine(wr io.Writer, fd uintptr) func(io.Writer, uintptr) {
+func clearCurrentLine(_ uintptr) func(io.Writer, uintptr) {
return posixClearCurrentLine
}
// moveCursorUp moves the cursor to the line n lines above the current one.
-func moveCursorUp(wr io.Writer, fd uintptr) func(io.Writer, uintptr, int) {
+func moveCursorUp(_ uintptr) func(io.Writer, uintptr, int) {
return posixMoveCursorUp
}
diff --git a/internal/ui/termstatus/terminal_windows.go b/internal/ui/termstatus/terminal_windows.go
index d1358c022..7bf5b0a37 100644
--- a/internal/ui/termstatus/terminal_windows.go
+++ b/internal/ui/termstatus/terminal_windows.go
@@ -15,7 +15,7 @@ import (
// clearCurrentLine removes all characters from the current line and resets the
// cursor position to the first column.
-func clearCurrentLine(wr io.Writer, fd uintptr) func(io.Writer, uintptr) {
+func clearCurrentLine(fd uintptr) func(io.Writer, uintptr) {
// easy case, the terminal is cmd or psh, without redirection
if isWindowsTerminal(fd) {
return windowsClearCurrentLine
@@ -26,7 +26,7 @@ func clearCurrentLine(wr io.Writer, fd uintptr) func(io.Writer, uintptr) {
}
// moveCursorUp moves the cursor to the line n lines above the current one.
-func moveCursorUp(wr io.Writer, fd uintptr) func(io.Writer, uintptr, int) {
+func moveCursorUp(fd uintptr) func(io.Writer, uintptr, int) {
// easy case, the terminal is cmd or psh, without redirection
if isWindowsTerminal(fd) {
return windowsMoveCursorUp
@@ -45,7 +45,7 @@ var (
// windowsClearCurrentLine removes all characters from the current line and
// resets the cursor position to the first column.
-func windowsClearCurrentLine(wr io.Writer, fd uintptr) {
+func windowsClearCurrentLine(_ io.Writer, fd uintptr) {
var info windows.ConsoleScreenBufferInfo
windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info)
@@ -61,7 +61,7 @@ func windowsClearCurrentLine(wr io.Writer, fd uintptr) {
}
// windowsMoveCursorUp moves the cursor to the line n lines above the current one.
-func windowsMoveCursorUp(wr io.Writer, fd uintptr, n int) {
+func windowsMoveCursorUp(_ io.Writer, fd uintptr, n int) {
var info windows.ConsoleScreenBufferInfo
windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info)
diff --git a/internal/walker/rewriter.go b/internal/walker/rewriter.go
index 6f063831e..649857032 100644
--- a/internal/walker/rewriter.go
+++ b/internal/walker/rewriter.go
@@ -9,13 +9,47 @@ import (
"github.com/restic/restic/internal/restic"
)
-// SelectByNameFunc returns true for all items that should be included (files and
-// dirs). If false is returned, files are ignored and dirs are not even walked.
-type SelectByNameFunc func(item string) bool
+type NodeRewriteFunc func(node *restic.Node, path string) *restic.Node
+type FailedTreeRewriteFunc func(nodeID restic.ID, path string, err error) (restic.ID, error)
-type TreeFilterVisitor struct {
- SelectByName SelectByNameFunc
- PrintExclude func(string)
+type RewriteOpts struct {
+ // return nil to remove the node
+ RewriteNode NodeRewriteFunc
+ // decide what to do with a tree that could not be loaded. Return nil to remove the node. By default the load error is returned which causes the operation to fail.
+ RewriteFailedTree FailedTreeRewriteFunc
+
+ AllowUnstableSerialization bool
+ DisableNodeCache bool
+}
+
+type idMap map[restic.ID]restic.ID
+
+type TreeRewriter struct {
+ opts RewriteOpts
+
+ replaces idMap
+}
+
+func NewTreeRewriter(opts RewriteOpts) *TreeRewriter {
+ rw := &TreeRewriter{
+ opts: opts,
+ }
+ if !opts.DisableNodeCache {
+ rw.replaces = make(idMap)
+ }
+ // setup default implementations
+ if rw.opts.RewriteNode == nil {
+ rw.opts.RewriteNode = func(node *restic.Node, path string) *restic.Node {
+ return node
+ }
+ }
+ if rw.opts.RewriteFailedTree == nil {
+ // fail with error by default
+ rw.opts.RewriteFailedTree = func(nodeID restic.ID, path string, err error) (restic.ID, error) {
+ return restic.ID{}, err
+ }
+ }
+ return rw
}
type BlobLoadSaver interface {
@@ -23,51 +57,58 @@ type BlobLoadSaver interface {
restic.BlobLoader
}
-func FilterTree(ctx context.Context, repo BlobLoadSaver, nodepath string, nodeID restic.ID, visitor *TreeFilterVisitor) (newNodeID restic.ID, err error) {
- curTree, err := restic.LoadTree(ctx, repo, nodeID)
- if err != nil {
- return restic.ID{}, err
+func (t *TreeRewriter) RewriteTree(ctx context.Context, repo BlobLoadSaver, nodepath string, nodeID restic.ID) (newNodeID restic.ID, err error) {
+ // check if tree was already changed
+ newID, ok := t.replaces[nodeID]
+ if ok {
+ return newID, nil
}
- // check that we can properly encode this tree without losing information
- // The alternative of using json/Decoder.DisallowUnknownFields() doesn't work as we use
- // a custom UnmarshalJSON to decode trees, see also https://github.com/golang/go/issues/41144
- testID, err := restic.SaveTree(ctx, repo, curTree)
+ // a nil nodeID will lead to a load error
+ curTree, err := restic.LoadTree(ctx, repo, nodeID)
if err != nil {
- return restic.ID{}, err
+ return t.opts.RewriteFailedTree(nodeID, nodepath, err)
}
- if nodeID != testID {
- return restic.ID{}, fmt.Errorf("cannot encode tree at %q without loosing information", nodepath)
+
+ if !t.opts.AllowUnstableSerialization {
+ // check that we can properly encode this tree without losing information
+ // The alternative of using json/Decoder.DisallowUnknownFields() doesn't work as we use
+ // a custom UnmarshalJSON to decode trees, see also https://github.com/golang/go/issues/41144
+ testID, err := restic.SaveTree(ctx, repo, curTree)
+ if err != nil {
+ return restic.ID{}, err
+ }
+ if nodeID != testID {
+ return restic.ID{}, fmt.Errorf("cannot encode tree at %q without losing information", nodepath)
+ }
}
debug.Log("filterTree: %s, nodeId: %s\n", nodepath, nodeID.Str())
- changed := false
tb := restic.NewTreeJSONBuilder()
for _, node := range curTree.Nodes {
path := path.Join(nodepath, node.Name)
- if !visitor.SelectByName(path) {
- if visitor.PrintExclude != nil {
- visitor.PrintExclude(path)
- }
- changed = true
+ node = t.opts.RewriteNode(node, path)
+ if node == nil {
continue
}
- if node.Subtree == nil {
+ if node.Type != "dir" {
err = tb.AddNode(node)
if err != nil {
return restic.ID{}, err
}
continue
}
- newID, err := FilterTree(ctx, repo, path, *node.Subtree, visitor)
+ // treat nil as null id
+ var subtree restic.ID
+ if node.Subtree != nil {
+ subtree = *node.Subtree
+ }
+ newID, err := t.RewriteTree(ctx, repo, path, subtree)
if err != nil {
return restic.ID{}, err
}
- if !node.Subtree.Equal(newID) {
- changed = true
- }
node.Subtree = &newID
err = tb.AddNode(node)
if err != nil {
@@ -75,17 +116,18 @@ func FilterTree(ctx context.Context, repo BlobLoadSaver, nodepath string, nodeID
}
}
- if changed {
- tree, err := tb.Finalize()
- if err != nil {
- return restic.ID{}, err
- }
+ tree, err := tb.Finalize()
+ if err != nil {
+ return restic.ID{}, err
+ }
- // Save new tree
- newTreeID, _, _, err := repo.SaveBlob(ctx, restic.TreeBlob, tree, restic.ID{}, false)
+ // Save new tree
+ newTreeID, _, _, err := repo.SaveBlob(ctx, restic.TreeBlob, tree, restic.ID{}, false)
+ if t.replaces != nil {
+ t.replaces[nodeID] = newTreeID
+ }
+ if !newTreeID.Equal(nodeID) {
debug.Log("filterTree: save new tree for %s as %v\n", nodepath, newTreeID)
- return newTreeID, err
}
-
- return nodeID, nil
+ return newTreeID, err
}
diff --git a/internal/walker/rewriter_test.go b/internal/walker/rewriter_test.go
index 3dcf0ac9e..716217ac6 100644
--- a/internal/walker/rewriter_test.go
+++ b/internal/walker/rewriter_test.go
@@ -5,9 +5,9 @@ import (
"fmt"
"testing"
- "github.com/google/go-cmp/cmp"
"github.com/pkg/errors"
"github.com/restic/restic/internal/restic"
+ "github.com/restic/restic/internal/test"
)
// WritableTreeMap also support saving
@@ -15,7 +15,7 @@ type WritableTreeMap struct {
TreeMap
}
-func (t WritableTreeMap) SaveBlob(ctx context.Context, tpe restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (newID restic.ID, known bool, size int, err error) {
+func (t WritableTreeMap) SaveBlob(_ context.Context, tpe restic.BlobType, buf []byte, id restic.ID, _ bool) (newID restic.ID, known bool, size int, err error) {
if tpe != restic.TreeBlob {
return restic.ID{}, false, 0, errors.New("can only save trees")
}
@@ -38,26 +38,26 @@ func (t WritableTreeMap) Dump() {
}
}
-type checkRewriteFunc func(t testing.TB) (visitor TreeFilterVisitor, final func(testing.TB))
+type checkRewriteFunc func(t testing.TB) (rewriter *TreeRewriter, final func(testing.TB))
// checkRewriteItemOrder ensures that the order of the 'path' arguments is the one passed in as 'want'.
func checkRewriteItemOrder(want []string) checkRewriteFunc {
pos := 0
- return func(t testing.TB) (visitor TreeFilterVisitor, final func(testing.TB)) {
- vis := TreeFilterVisitor{
- SelectByName: func(path string) bool {
+ return func(t testing.TB) (rewriter *TreeRewriter, final func(testing.TB)) {
+ rewriter = NewTreeRewriter(RewriteOpts{
+ RewriteNode: func(node *restic.Node, path string) *restic.Node {
if pos >= len(want) {
t.Errorf("additional unexpected path found: %v", path)
- return false
+ return nil
}
if path != want[pos] {
t.Errorf("wrong path found, want %q, got %q", want[pos], path)
}
pos++
- return true
+ return node
},
- }
+ })
final = func(t testing.TB) {
if pos != len(want) {
@@ -65,21 +65,20 @@ func checkRewriteItemOrder(want []string) checkRewriteFunc {
}
}
- return vis, final
+ return rewriter, final
}
}
-// checkRewriteSkips excludes nodes if path is in skipFor, it checks that all excluded entries are printed.
-func checkRewriteSkips(skipFor map[string]struct{}, want []string) checkRewriteFunc {
+// checkRewriteSkips excludes nodes if path is in skipFor, it checks that rewriting proceedes in the correct order.
+func checkRewriteSkips(skipFor map[string]struct{}, want []string, disableCache bool) checkRewriteFunc {
var pos int
- printed := make(map[string]struct{})
- return func(t testing.TB) (visitor TreeFilterVisitor, final func(testing.TB)) {
- vis := TreeFilterVisitor{
- SelectByName: func(path string) bool {
+ return func(t testing.TB) (rewriter *TreeRewriter, final func(testing.TB)) {
+ rewriter = NewTreeRewriter(RewriteOpts{
+ RewriteNode: func(node *restic.Node, path string) *restic.Node {
if pos >= len(want) {
t.Errorf("additional unexpected path found: %v", path)
- return false
+ return nil
}
if path != want[pos] {
@@ -87,27 +86,40 @@ func checkRewriteSkips(skipFor map[string]struct{}, want []string) checkRewriteF
}
pos++
- _, ok := skipFor[path]
- return !ok
- },
- PrintExclude: func(s string) {
- if _, ok := printed[s]; ok {
- t.Errorf("path was already printed %v", s)
+ _, skip := skipFor[path]
+ if skip {
+ return nil
}
- printed[s] = struct{}{}
+ return node
},
- }
+ DisableNodeCache: disableCache,
+ })
final = func(t testing.TB) {
- if !cmp.Equal(skipFor, printed) {
- t.Errorf("unexpected paths skipped: %s", cmp.Diff(skipFor, printed))
- }
if pos != len(want) {
t.Errorf("not enough items returned, want %d, got %d", len(want), pos)
}
}
- return vis, final
+ return rewriter, final
+ }
+}
+
+// checkIncreaseNodeSize modifies each node by changing its size.
+func checkIncreaseNodeSize(increase uint64) checkRewriteFunc {
+ return func(t testing.TB) (rewriter *TreeRewriter, final func(testing.TB)) {
+ rewriter = NewTreeRewriter(RewriteOpts{
+ RewriteNode: func(node *restic.Node, path string) *restic.Node {
+ if node.Type == "file" {
+ node.Size += increase
+ }
+ return node
+ },
+ })
+
+ final = func(t testing.TB) {}
+
+ return rewriter, final
}
}
@@ -150,6 +162,7 @@ func TestRewriter(t *testing.T) {
"/subdir",
"/subdir/subfile",
},
+ false,
),
},
{ // exclude dir
@@ -170,6 +183,91 @@ func TestRewriter(t *testing.T) {
"/foo",
"/subdir",
},
+ false,
+ ),
+ },
+ { // modify node
+ tree: TestTree{
+ "foo": TestFile{Size: 21},
+ "subdir": TestTree{
+ "subfile": TestFile{Size: 21},
+ },
+ },
+ newTree: TestTree{
+ "foo": TestFile{Size: 42},
+ "subdir": TestTree{
+ "subfile": TestFile{Size: 42},
+ },
+ },
+ check: checkIncreaseNodeSize(21),
+ },
+ { // test cache
+ tree: TestTree{
+ // both subdirs are identical
+ "subdir1": TestTree{
+ "subfile": TestFile{},
+ "subfile2": TestFile{},
+ },
+ "subdir2": TestTree{
+ "subfile": TestFile{},
+ "subfile2": TestFile{},
+ },
+ },
+ newTree: TestTree{
+ "subdir1": TestTree{
+ "subfile2": TestFile{},
+ },
+ "subdir2": TestTree{
+ "subfile2": TestFile{},
+ },
+ },
+ check: checkRewriteSkips(
+ map[string]struct{}{
+ "/subdir1/subfile": {},
+ },
+ []string{
+ "/subdir1",
+ "/subdir1/subfile",
+ "/subdir1/subfile2",
+ "/subdir2",
+ },
+ false,
+ ),
+ },
+ { // test disabled cache
+ tree: TestTree{
+ // both subdirs are identical
+ "subdir1": TestTree{
+ "subfile": TestFile{},
+ "subfile2": TestFile{},
+ },
+ "subdir2": TestTree{
+ "subfile": TestFile{},
+ "subfile2": TestFile{},
+ },
+ },
+ newTree: TestTree{
+ "subdir1": TestTree{
+ "subfile2": TestFile{},
+ },
+ "subdir2": TestTree{
+ "subfile": TestFile{},
+ "subfile2": TestFile{},
+ },
+ },
+ check: checkRewriteSkips(
+ map[string]struct{}{
+ "/subdir1/subfile": {},
+ },
+ []string{
+ "/subdir1",
+ "/subdir1/subfile",
+ "/subdir1/subfile2",
+ "/subdir2",
+ "/subdir2/subfile",
+ "/subdir2/subfile2",
+ },
+ true,
),
},
}
@@ -186,8 +284,8 @@ func TestRewriter(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
- vis, last := test.check(t)
- newRoot, err := FilterTree(ctx, modrepo, "/", root, &vis)
+ rewriter, last := test.check(t)
+ newRoot, err := rewriter.RewriteTree(ctx, modrepo, "/", root)
if err != nil {
t.Error(err)
}
@@ -213,10 +311,56 @@ func TestRewriterFailOnUnknownFields(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
- // use nil visitor to crash if the tree loading works unexpectedly
- _, err := FilterTree(ctx, tm, "/", id, nil)
+
+ rewriter := NewTreeRewriter(RewriteOpts{
+ RewriteNode: func(node *restic.Node, path string) *restic.Node {
+ // tree loading must not succeed
+ t.Fail()
+ return node
+ },
+ })
+ _, err := rewriter.RewriteTree(ctx, tm, "/", id)
if err == nil {
t.Error("missing error on unknown field")
}
+
+ // check that the serialization check can be disabled
+ rewriter = NewTreeRewriter(RewriteOpts{
+ AllowUnstableSerialization: true,
+ })
+ root, err := rewriter.RewriteTree(ctx, tm, "/", id)
+ test.OK(t, err)
+ _, expRoot := BuildTreeMap(TestTree{
+ "subfile": TestFile{},
+ })
+ test.Assert(t, root == expRoot, "mismatched trees")
+}
+
+func TestRewriterTreeLoadError(t *testing.T) {
+ tm := WritableTreeMap{TreeMap{}}
+ id := restic.NewRandomID()
+
+ ctx, cancel := context.WithCancel(context.TODO())
+ defer cancel()
+
+ // also check that load error by default cause the operation to fail
+ rewriter := NewTreeRewriter(RewriteOpts{})
+ _, err := rewriter.RewriteTree(ctx, tm, "/", id)
+ if err == nil {
+ t.Fatal("missing error on unloadable tree")
+ }
+
+ replacementID := restic.NewRandomID()
+ rewriter = NewTreeRewriter(RewriteOpts{
+ RewriteFailedTree: func(nodeID restic.ID, path string, err error) (restic.ID, error) {
+ if nodeID != id || path != "/" {
+ t.Fail()
+ }
+ return replacementID, nil
+ },
+ })
+ newRoot, err := rewriter.RewriteTree(ctx, tm, "/", id)
+ test.OK(t, err)
+ test.Equals(t, replacementID, newRoot)
}
diff --git a/internal/walker/walker_test.go b/internal/walker/walker_test.go
index 6c4fd3436..54cc69792 100644
--- a/internal/walker/walker_test.go
+++ b/internal/walker/walker_test.go
@@ -14,7 +14,9 @@ import (
type TestTree map[string]interface{}
// TestNode is used to test the walker.
-type TestFile struct{}
+type TestFile struct {
+ Size uint64
+}
func BuildTreeMap(tree TestTree) (m TreeMap, root restic.ID) {
m = TreeMap{}
@@ -37,6 +39,7 @@ func buildTreeMap(tree TestTree, m TreeMap) restic.ID {
err := tb.AddNode(&restic.Node{
Name: name,
Type: "file",
+ Size: elem.Size,
})
if err != nil {
panic(err)
@@ -73,7 +76,7 @@ func buildTreeMap(tree TestTree, m TreeMap) restic.ID {
// TreeMap returns the trees from the map on LoadTree.
type TreeMap map[restic.ID][]byte
-func (t TreeMap) LoadBlob(ctx context.Context, tpe restic.BlobType, id restic.ID, buf []byte) ([]byte, error) {
+func (t TreeMap) LoadBlob(_ context.Context, tpe restic.BlobType, id restic.ID, _ []byte) ([]byte, error) {
if tpe != restic.TreeBlob {
return nil, errors.New("can only load trees")
}