From 3738e19a3433f6d8d2b843ac6fa0091bcb732ba2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A9lix=20Sipma?= Date: Wed, 9 Jan 2019 09:32:39 -0500 Subject: Import restic_0.9.4+ds-1.debian.tar.xz [dgit import tarball restic 0.9.4+ds-1 restic_0.9.4+ds-1.debian.tar.xz] --- changelog | 208 ++++++++++++++++++++++++++++++++++++++ compat | 1 + control | 69 +++++++++++++ copyright | 124 +++++++++++++++++++++++ doc-base | 9 ++ docs | 1 + gbp.conf | 5 + gitlab-ci.yml | 28 +++++ patches/0001-privacy-breach.patch | 47 +++++++++ patches/series | 1 + restic.links | 2 + restic.manpages | 1 + rules | 41 ++++++++ source/format | 1 + upstream/signing-key.asc | 158 +++++++++++++++++++++++++++++ watch | 7 ++ 16 files changed, 703 insertions(+) create mode 100644 changelog create mode 100644 compat create mode 100644 control create mode 100644 copyright create mode 100644 doc-base create mode 100644 docs create mode 100644 gbp.conf create mode 100644 gitlab-ci.yml create mode 100644 patches/0001-privacy-breach.patch create mode 100644 patches/series create mode 100644 restic.links create mode 100644 restic.manpages create mode 100755 rules create mode 100644 source/format create mode 100644 upstream/signing-key.asc create mode 100644 watch diff --git a/changelog b/changelog new file mode 100644 index 000000000..04a9cc32b --- /dev/null +++ b/changelog @@ -0,0 +1,208 @@ +restic (0.9.4+ds-1) unstable; urgency=medium + + * add golang-github-spf13-pflag-dev as a build dependency + * setting the version manually is not needed anymore + (https://github.com/restic/restic/issues/1958#issuecomment-413972449) + * New upstream version 0.9.4+ds + * add golang-github-hashicorp-golang-lru-dev to build dependencies + * bump Standards-Version to 4.3.0 (no change required) + + -- Félix Sipma Wed, 09 Jan 2019 15:32:39 +0100 + +restic (0.9.3+ds-1) unstable; urgency=medium + + * New upstream version 0.9.3+ds + * install restic binary only (ignore binaries used to prepare release) + + -- Félix Sipma Mon, 22 Oct 2018 19:23:21 +0200 + +restic (0.9.2+ds-2) unstable; urgency=medium + + * add golang-github-mattn-go-isatty-dev build-dependency (Closes: #909414) + * bump Standards-Version to 4.2.1 (no change required) + + -- Félix Sipma Mon, 24 Sep 2018 10:01:51 +0200 + +restic (0.9.2+ds-1) unstable; urgency=medium + + * watch file: use /releases instead of /tags (fails otherwise) + * New upstream version 0.9.2+ds + * rebase privacy-breach patch + * bump Standards-Version to 4.2.0 (no change required) + + -- Félix Sipma Wed, 15 Aug 2018 17:22:46 +0200 + +restic (0.9.1+ds-1) unstable; urgency=medium + + * New upstream version 0.9.1+ds + * update privacy breach patch + * bump Standards-Version to 4.1.4 (no change required) + * add golang-gopkg-tomb.v2-dev build dependency + * add golang-github-google-go-cmp-dev build dependency + * bump golang-github-cenkalti-backoff-dev required version + + -- Félix Sipma Sun, 10 Jun 2018 20:05:21 +0200 + +restic (0.8.3+ds-1) unstable; urgency=medium + + [ Michael Stapelberg ] + * update debian/gitlab-ci.yml (using salsa.debian.org/go-team/ci/cmd/ci) + + [ Félix Sipma ] + * New upstream version 0.8.3+ds + + -- Félix Sipma Tue, 27 Feb 2018 13:06:57 +0100 + +restic (0.8.2+ds-1) unstable; urgency=medium + + * New upstream version 0.8.2+ds + * add +ds suffix to version name + * bump compat to 11 + + -- Félix Sipma Mon, 19 Feb 2018 13:13:12 +0100 + +restic (0.8.1-3) unstable; urgency=medium + + * Team upload. + * Vcs-* urls: pkg-go-team -> go-team. + + -- Alexandre Viau Tue, 06 Feb 2018 00:16:02 -0500 + +restic (0.8.1-2) unstable; urgency=medium + + * Team upload. + * point Vcs-* urls to salsa.d.o subproject + + -- Alexandre Viau Thu, 25 Jan 2018 16:55:55 -0500 + +restic (0.8.1-1) unstable; urgency=medium + + * New upstream version 0.8.1 + * generate zsh completion (Closes: #883446) + * bump Standards-Version to 4.1.3 (no change required) + * move to salsa.debian.org + + -- Félix Sipma Fri, 05 Jan 2018 11:47:08 +0100 + +restic (0.8.0-1) unstable; urgency=medium + + * New upstream version 0.8.0 + * add golang-github-cenkalti-backoff-dev and + golang-github-juju-ratelimit-dev dependencies + * update minimum requirements of dependencies + * bump Standards-Version to 4.1.1 (no change required) + * update privacy-breach patch + * use new "generate --bash-completion" command to generate bash-completion + * put html doc in html/ dir + + -- Félix Sipma Mon, 27 Nov 2017 18:05:14 +0100 + +restic (0.7.3+debian0-2) unstable; urgency=medium + + * avoid compression of fonts files (Closes: #872175) + + Note: firefox does not like fonts in doc to be symlinked. B-D on + fonts-font-awesome results in errors in firefox console when opening + restic doc: + + downloadable font: download failed (font-family: "FontAwesome" + style:normal weight:normal stretch:normal src index:2): bad URI or + cross-site access not allowed source: + file:///usr/share/doc/restic/_static/fonts/fontawesome-webfont.woff?v=4.6.3 + + -- Félix Sipma Tue, 26 Sep 2017 12:04:00 +0200 + +restic (0.7.3+debian0-1) unstable; urgency=medium + + * New upstream version 0.7.3 + * repack 0.7.3 to exclude vendor files + * update build dependencies + * mangle version in watch file + * remove fix-imports patch (not needed anymore) + * remove sphinxdoc from dh --with options (build fails otherwise) + * avoid embedding jquery.js & underscore.js from sphinx + * install manpages (Closes: #872173) + * upgrade standard-version to 4.1.0 + - convert deprecated "extra" priority to "optional" + * ignore all files in vendor/ dir + * Generate and install bash-completion file. (Closes: #872174). Thanks, + Michael Biebl. + + -- Félix Sipma Tue, 26 Sep 2017 10:49:19 +0200 + +restic (0.7.1-1) unstable; urgency=medium + + * New upstream version 0.7.1 + * drop 0001-Debianize-doc-Manual.md.patch (manual changed upstream) + * Drop 0002-Update-code-for-pkg-xattr-v0.2.0.patch (merged upstream) + * add new build-deps + * Disable FUSE tests + * refresh fix-imports.patch + * build restructuredtext docs using sphinx + * Add privacy-breach.patch, removing badge images from docs + * Add doc-base file + * bump standards-version (no changes necessary) + + -- Michael Stapelberg Thu, 27 Jul 2017 08:55:09 +0200 + +restic (0.5.0-2) unstable; urgency=medium + + * Move package to pkg-go + * Experimental to unstable as the freeze is over + + -- Michael Stapelberg Wed, 19 Jul 2017 08:53:18 +0200 + +restic (0.5.0-1) experimental; urgency=medium + + * New upstream version 0.5.0 + * uscan: use signed archives + * build-depend on golang-github-pkg-xattr-dev + * rebase Manual.md patch + * Update code for pkg xattr v0.2.0 patch + (upstream patch) + * update watch regex + + -- Félix Sipma Tue, 25 Apr 2017 17:50:16 +0200 + +restic (0.4.0-1) experimental; urgency=medium + + * New upstream version 0.4.0 + + -- Félix Sipma Thu, 02 Feb 2017 12:48:34 +0100 + +restic (0.3.3-1) unstable; urgency=medium + + * New upstream version 0.3.3 + * binary should go to utils, not misc + * adapt copyright file to the removal of src/cmds/restic-server/ + + -- Félix Sipma Sun, 08 Jan 2017 22:06:16 +0100 + +restic (0.3.2-2) unstable; urgency=medium + + * update copyright + + -- Félix Sipma Wed, 21 Dec 2016 14:56:23 +0100 + +restic (0.3.2-1) unstable; urgency=medium + + * Initial release. (Closes: #846176) + * use "go build" instead of "go run build.go" to build + Thanks, Alexander Neumann + * remove useless patch: 0001-replace-vendor-src-by-usr-share-gocode-src.patch + * add debian watch file + * add initial debian rules file + * add debian compat file + * add debian source/format file + * add initial debian control file + * add debian copyright file + * replace vendor/src by /usr/share/gocode/src + * add build dependencies + * debianize doc/Manual.md + * add docs + * add build dependency constraint + * enable pristine-tar feature in gbp.conf + * use Files-Excluded feature of uscan + * New upstream version 0.3.2 + + -- Félix Sipma Mon, 19 Dec 2016 15:28:59 +0100 diff --git a/compat b/compat new file mode 100644 index 000000000..b4de39476 --- /dev/null +++ b/compat @@ -0,0 +1 @@ +11 diff --git a/control b/control new file mode 100644 index 000000000..84d2304d2 --- /dev/null +++ b/control @@ -0,0 +1,69 @@ +Source: restic +Maintainer: Debian Go Packaging Team +Uploaders: Félix Sipma , + Michael Stapelberg +Priority: optional +Section: utils +Build-Depends: debhelper (>= 11), + dh-golang, + golang-any, + golang-bazil-fuse-dev, + golang-github-azure-azure-sdk-for-go-dev, + golang-github-cenkalti-backoff-dev (>= 2.0.0), + golang-github-elithrar-simple-scrypt-dev, + golang-github-google-go-cmp-dev, + golang-github-hashicorp-golang-lru-dev, + golang-github-juju-ratelimit-dev, + golang-github-kurin-blazer-dev, + golang-github-mattn-go-isatty-dev, + golang-github-minio-minio-go-dev (>= 4.0.5), + golang-github-ncw-swift-dev, + golang-github-pkg-errors-dev, + golang-github-pkg-sftp-dev, + golang-github-pkg-xattr-dev, + golang-github-restic-chunker-dev (>= 0.2.0), + golang-github-spf13-cobra-dev (>= 0.0.1), + golang-github-spf13-pflag-dev, + golang-golang-x-crypto-dev, + golang-golang-x-net-dev, + golang-golang-x-oauth2-dev, + golang-golang-x-sys-dev, + golang-google-api-dev, + golang-gopkg-tomb.v2-dev, + sphinx-common, + python3-sphinx, + python3-sphinx-rtd-theme +Standards-Version: 4.3.0 +Homepage: https://github.com/restic/restic +Vcs-Git: https://salsa.debian.org/go-team/packages/restic.git +Vcs-Browser: https://salsa.debian.org/go-team/packages/restic +XS-Go-Import-Path: github.com/restic/restic + +Package: restic +Architecture: any +Depends: ${shlibs:Depends}, ${misc:Depends} +Suggests: libjs-jquery, libjs-underscore +Built-Using: ${misc:Built-Using} +Description: backup program with multiple revisions, encryption and more + restic is a program that does backups right and was designed with the following + principles in mind: + - Easy: Doing backups should be a frictionless process, otherwise you might + be tempted to skip it. Restic should be easy to configure and use, so + that, in the event of a data loss, you can just restore it. Likewise, + restoring data should not be complicated. + - Fast: Backing up your data with restic should only be limited by your + network or hard disk bandwidth so that you can backup your files every day. + Nobody does backups if it takes too much time. Restoring backups should + only transfer data that is needed for the files that are to be restored, so + that this process is also fast. + - Verifiable: Much more important than backup is restore, so restic enables + you to easily verify that all data can be restored. + - Secure: Restic uses cryptography to guarantee confidentiality and + integrity of your data. The location the backup data is stored is assumed + not to be a trusted environment (e.g. a shared space where others like + system administrators are able to access your backups). Restic is + built to secure your data against such attackers. + - Efficient: With the growth of data, additional snapshots should only take + the storage of the actual increment. Even more, duplicate data should be + de-duplicated before it is actually written to the storage back end to save + precious backup space. diff --git a/copyright b/copyright new file mode 100644 index 000000000..f1a7c915d --- /dev/null +++ b/copyright @@ -0,0 +1,124 @@ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: restic +Upstream-Contact: Alexander Neumann +Source: https://github.com/restic/restic +Files-Excluded: vendor/** + +Files: * +Copyright: 2014 Alexander Neumann +License: BSD-2-clause + Copyright (c) 2014, Alexander Neumann + All rights reserved. + . + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + . + 1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + . + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + . + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Files: doc/logo/font/* +Copyright: 2011 Eduardo Tunni (http://www.tipo.net.ar) +License: OFL-1.1 + Copyright (c) 2011, Eduardo Tunni (http://www.tipo.net.ar), + with Reserved Font Name "Lemon" + This Font Software is licensed under the SIL Open Font License, Version 1.1. + This license is copied below, and is also available with a FAQ at: + http://scripts.sil.org/OFL + . + . + ----------------------------------------------------------- + SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 + ----------------------------------------------------------- + . + PREAMBLE + The goals of the Open Font License (OFL) are to stimulate worldwide + development of collaborative font projects, to support the font creation + efforts of academic and linguistic communities, and to provide a free and + open framework in which fonts may be shared and improved in partnership + with others. + . + The OFL allows the licensed fonts to be used, studied, modified and + redistributed freely as long as they are not sold by themselves. The + fonts, including any derivative works, can be bundled, embedded, + redistributed and/or sold with any software provided that any reserved + names are not used by derivative works. The fonts and derivatives, + however, cannot be released under any other type of license. The + requirement for fonts to remain under this license does not apply + to any document created using the fonts or their derivatives. + . + DEFINITIONS + "Font Software" refers to the set of files released by the Copyright + Holder(s) under this license and clearly marked as such. This may + include source files, build scripts and documentation. + . + "Reserved Font Name" refers to any names specified as such after the + copyright statement(s). + . + "Original Version" refers to the collection of Font Software components as + distributed by the Copyright Holder(s). + . + "Modified Version" refers to any derivative made by adding to, deleting, + or substituting -- in part or in whole -- any of the components of the + Original Version, by changing formats or by porting the Font Software to a + new environment. + . + "Author" refers to any designer, engineer, programmer, technical + writer or other person who contributed to the Font Software. + . + PERMISSION & CONDITIONS + Permission is hereby granted, free of charge, to any person obtaining + a copy of the Font Software, to use, study, copy, merge, embed, modify, + redistribute, and sell modified and unmodified copies of the Font + Software, subject to the following conditions: + . + 1) Neither the Font Software nor any of its individual components, + in Original or Modified Versions, may be sold by itself. + . + 2) Original or Modified Versions of the Font Software may be bundled, + redistributed and/or sold with any software, provided that each copy + contains the above copyright notice and this license. These can be + included either as stand-alone text files, human-readable headers or + in the appropriate machine-readable metadata fields within text or + binary files as long as those fields can be easily viewed by the user. + . + 3) No Modified Version of the Font Software may use the Reserved Font + Name(s) unless explicit written permission is granted by the corresponding + Copyright Holder. This restriction only applies to the primary font name as + presented to the users. + . + 4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font + Software shall not be used to promote, endorse or advertise any + Modified Version, except to acknowledge the contribution(s) of the + Copyright Holder(s) and the Author(s) or with their explicit written + permission. + +Files: debian/* +Copyright: 2016 Félix Sipma +License: GPL-3+ + This package is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by the + Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + . + This package is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + See the GNU General Public License for more details. + . + On Debian systems, the complete text of the GNU General Public + License version 3 can be found in `/usr/share/common-licenses/GPL-3'. diff --git a/doc-base b/doc-base new file mode 100644 index 000000000..5a35724a9 --- /dev/null +++ b/doc-base @@ -0,0 +1,9 @@ +Document: restic +Title: Restic Documentation +Author: restic authors +Abstract: restic is a backup program that is fast, efficient and secure. +Section: File Management + +Format: HTML +Index: /usr/share/doc/restic/html/index.html +Files: /usr/share/doc/restic/html/index.html diff --git a/docs b/docs new file mode 100644 index 000000000..344fcaaf0 --- /dev/null +++ b/docs @@ -0,0 +1 @@ +build/html/ diff --git a/gbp.conf b/gbp.conf new file mode 100644 index 000000000..683ec363f --- /dev/null +++ b/gbp.conf @@ -0,0 +1,5 @@ +[DEFAULT] +pristine-tar = True + +[import-orig] +merge-mode = replace diff --git a/gitlab-ci.yml b/gitlab-ci.yml new file mode 100644 index 000000000..5c8c31bf4 --- /dev/null +++ b/gitlab-ci.yml @@ -0,0 +1,28 @@ + +# auto-generated, DO NOT MODIFY. +# The authoritative copy of this file lives at: +# https://salsa.debian.org/go-team/ci/blob/master/cmd/ci/gitlabciyml.go + +# TODO: publish under debian-go-team/ci +image: stapelberg/ci2 + +test_the_archive: + artifacts: + paths: + - before-applying-commit.json + - after-applying-commit.json + script: + # Create an overlay to discard writes to /srv/gopath/src after the build: + - "rm -rf /cache/overlay/{upper,work}" + - "mkdir -p /cache/overlay/{upper,work}" + - "mount -t overlay overlay -o lowerdir=/srv/gopath/src,upperdir=/cache/overlay/upper,workdir=/cache/overlay/work /srv/gopath/src" + - "export GOPATH=/srv/gopath" + - "export GOCACHE=/cache/go" + # Build the world as-is: + - "ci-build -exemptions=/var/lib/ci-build/exemptions.json > before-applying-commit.json" + # Copy this package into the overlay: + - "GBP_CONF_FILES=:debian/gbp.conf gbp buildpackage --git-no-pristine-tar --git-ignore-branch --git-ignore-new --git-export-dir=/tmp/export --git-no-overlay --git-tarball-dir=/nonexistant --git-cleaner=/bin/true --git-builder='dpkg-buildpackage -S -d --no-sign'" + - "pgt-gopath -dsc /tmp/export/*.dsc" + # Rebuild the world: + - "ci-build -exemptions=/var/lib/ci-build/exemptions.json > after-applying-commit.json" + - "ci-diff before-applying-commit.json after-applying-commit.json" diff --git a/patches/0001-privacy-breach.patch b/patches/0001-privacy-breach.patch new file mode 100644 index 000000000..d6345e469 --- /dev/null +++ b/patches/0001-privacy-breach.patch @@ -0,0 +1,47 @@ +From: =?utf-8?q?F=C3=A9lix_Sipma?= +Date: Sat, 9 Jun 2018 10:31:31 +0200 +Subject: privacy breach + +--- + README.rst | 27 --------------------------- + 1 file changed, 27 deletions(-) + +diff --git a/README.rst b/README.rst +index 64f6ecb..1172e9b 100644 +--- a/README.rst ++++ b/README.rst +@@ -1,5 +1,3 @@ +-|Documentation| |Build Status| |Build status| |Report Card| |Say Thanks| |TestCoverage| |Reviewed by Hound| +- + Introduction + ------------ + +@@ -110,28 +108,3 @@ License + + Restic is licensed under `BSD 2-Clause License `__. You can find the + complete text in ``LICENSE``. +- +-Sponsorship +------------ +- +-Backend integration tests for Google Cloud Storage and Microsoft Azure Blob +-Storage are sponsored by `AppsCode `__! +- +-|AppsCode| +- +-.. |Documentation| image:: https://readthedocs.org/projects/restic/badge/?version=latest +- :target: https://restic.readthedocs.io/en/latest/?badge=latest +-.. |Build Status| image:: https://travis-ci.com/restic/restic.svg?branch=master +- :target: https://travis-ci.com/restic/restic +-.. |Build status| image:: https://ci.appveyor.com/api/projects/status/nuy4lfbgfbytw92q/branch/master?svg=true +- :target: https://ci.appveyor.com/project/fd0/restic/branch/master +-.. |Report Card| image:: https://goreportcard.com/badge/github.com/restic/restic +- :target: https://goreportcard.com/report/github.com/restic/restic +-.. |Say Thanks| image:: https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg +- :target: https://saythanks.io/to/restic +-.. |TestCoverage| image:: https://codecov.io/gh/restic/restic/branch/master/graph/badge.svg +- :target: https://codecov.io/gh/restic/restic +-.. |AppsCode| image:: https://cdn.appscode.com/images/logo/appscode/ac-logo-color.png +- :target: https://appscode.com +-.. |Reviewed by Hound| image:: https://img.shields.io/badge/Reviewed_by-Hound-8E64B0.svg +- :target: https://houndci.com diff --git a/patches/series b/patches/series new file mode 100644 index 000000000..959a4e23c --- /dev/null +++ b/patches/series @@ -0,0 +1 @@ +0001-privacy-breach.patch diff --git a/restic.links b/restic.links new file mode 100644 index 000000000..a9fafad1c --- /dev/null +++ b/restic.links @@ -0,0 +1,2 @@ +usr/share/javascript/jquery/jquery.js usr/share/doc/restic/html/_static/jquery.js +usr/share/javascript/underscore/underscore.js usr/share/doc/restic/html/_static/underscore.js diff --git a/restic.manpages b/restic.manpages new file mode 100644 index 000000000..f24bd33fa --- /dev/null +++ b/restic.manpages @@ -0,0 +1 @@ +doc/man/* diff --git a/rules b/rules new file mode 100755 index 000000000..46160a41f --- /dev/null +++ b/rules @@ -0,0 +1,41 @@ +#!/usr/bin/make -f + +include /usr/share/dpkg/architecture.mk + +export DH_OPTIONS +# Install everything, for testdata/ directories: +export DH_GOLANG_INSTALL_ALL := 1 + +# Install only restic binary, other binaries are used to prepare release. +export DH_GOLANG_BUILDPKG := github.com/restic/restic/cmd/restic + +# FUSE doesn’t work within schroot +export RESTIC_TEST_FUSE := 0 + +%: + dh $@ --buildsystem=golang --with=golang + +# Prevent sphinx-build from accessing the internet during build. +override_dh_auto_build: export http_proxy=127.0.0.1:9 +override_dh_auto_build: export https_proxy=127.0.0.1:9 +override_dh_auto_build: + dh_auto_build + PYTHONPATH=. sphinx-build -N -bhtml doc/ -d debian/doctrees build/html + +# Do not install the restic source code, there are no downstream consumers and +# it is not intended to be used as a library right now. +override_dh_auto_install: + dh_auto_install -- --no-source +ifeq ($(DEB_BUILD_ARCH),$(DEB_HOST_ARCH)) + install -d debian/restic/usr/share/bash-completion/completions + debian/restic/usr/bin/restic generate --bash-completion debian/restic/usr/share/bash-completion/completions/restic + install -d debian/restic/usr/share/zsh/vendor-completions + debian/restic/usr/bin/restic generate --zsh-completion debian/restic/usr/share/zsh/vendor-completions/_restic +endif + +override_dh_compress: + dh_compress -X.ttf -X.svg -X.eot -X.woff + +override_dh_auto_clean: + dh_auto_clean + rm -rf debian/doctrees diff --git a/source/format b/source/format new file mode 100644 index 000000000..163aaf8d8 --- /dev/null +++ b/source/format @@ -0,0 +1 @@ +3.0 (quilt) diff --git a/upstream/signing-key.asc b/upstream/signing-key.asc new file mode 100644 index 000000000..e51baf464 --- /dev/null +++ b/upstream/signing-key.asc @@ -0,0 +1,158 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBFRVIb8BEADUex/4rH/aeR3CN044zqFD45SKUh/8pC44Bw85iRSSE9xEZsLB +LUF6ZtT3HNXfxh7TRpTeHnXABnr8EtNwsmMjItDaSClf5jM0qKVfRIHBZ2N539oF +lHiCEsg+Q6kJEXHSbqder21goihfcjJBVKFX6ULgCbymOu03fzbhe/m5R57gDU2H ++gcgoI6a5ib11oq2pRdbC9NkEg7YXHbMlZ5s6fIAgklyDQqAlH8QNiRYcyC/4NrG +WXLwUTDssFn3hoJlAxZwj+dRZAit6Hgj2US05Ra/gJqZWzKyE2ywglO9sc2wD3sE +0Ti1tS9VJr7WNcZzVMXj1qBIlBkl4/E5tIiNEZ5BrAhmdSYbZvP2cb6RFn5clKh9 +i+XpeBIGiuAUgXTcV/+OBHjLq+Aeastktk7zaZ9QQoRMHksG02hPI7Z7iIRrhhgD +xsM2XAkwZXp21lpZtkEGYc2qo5ddu+qdZ1tHf5HqJ4JHj2hoRdr4nL6cwA8TlCSc +9PIifkKWVhMSEnkF2PXi+FZqkPnt1sO27Xt5i3BuaWmWig6gB0qh+7sW4o371MpZ +8SPKZgoFA5kJlqkOoSwZyY4M7TRR+GbZuZARUS+BTLsAeJ5Gik9Lhe1saE5UGncf +wYmh+sOi4vRDyoSkPthnBvvlmHp7yo7MiNAUPWHiuv2FWU0rPwB05NOinQARAQAB +tChBbGV4YW5kZXIgTmV1bWFubiA8YWxleGFuZGVyQGJ1bXBlcm4uZGU+iEYEEBEI +AAYFAlRVKToACgkQFBE43aPkXWafmgCfcR+LfsAn6aZxjX46Km5tmWpDVrAAoJfH +pfBG5MEki2MOoKvEsWDZqhHSiQIcBBABAgAGBQJWBrfPAAoJEB5F+Mqd4jsWKCoQ +AIgC4I+rQ1UivkF1sk0A6IUvWAP4yfNBM6UPK5bxFLd/kjL9LiiWD74NVmQ8WAa1 +Y3cv/gVrmekcVhxXGf3bZcGKBnYmmpR0xWliXxQJ9CGPwtoaN7ZDKPbOKFzOHX3D +iCZ9ePrxwpgym5iu541X2aGH/cqh9bPS4vxvNez+5fwhSdDe6iJZ09/oiJuMkyir +3SKx3SJRf3Yd2G2k2bPJae2NjiolKIJrgNMeSSYahaMHF8i+UpUQMqXK4vAWPFML +9DzwjJVbnJuJ8s9T/5wy3Lw33Fpe33a8bTonbEk60+NwhlnRAho0J5At9LVpTUuA +0+5LAT2kwAzk2dPzYJl9SVYOQWP8Zj4tEDGFhQPfdnYMhQB2ECTrtLBLJbqnEbCg +RA7VTlGnsb+PU+Ut1GLhglPFPRjfAhWRKBLe9sDYn8icrhJqvEyc8YMjeBSMEuQU +m65b0fjUUl9eBSdRxy2RkQiPTg+o8kLOOnt6+ar3S+JXIcN4GpLfBt5cpBiU53Tk +uTJYHqIHqKyLgEfIIfNRrKTbK6sCfA5STKTfJSmewY2vGM7D4njQ2Iz8a4SU/XFO +WQP0zlehDe1jhLXqYBlYMyXoULkXLkMfmIZXAHoVn7z1POa94NcKePpW2BFm4Q0O +jrwY2/ufPF/RlB4qNiFsrVuWpL7eMzaMZ+JVoZXxEPMRiQIcBBABCAAGBQJUZRF9 +AAoJEGo6ELMcEJUXrtIP/iJh0i7VaQG2F+QqGW8Evqng33HeLzW9BvYxt3X1QNb4 +ylFIc51Hl33yh5ZkPmZk/I3mBaDd23IE2rhzETxDGrAMnE6zeaQ+iTu6iySBxqHj +tK0HwKObuBA4Sw803Hn31OwaZ8a3TEUkyiHPh8NBuxbvXNuOrxsglATE4KCuUGjG +dmNs1raG9mqSUXgZCh1q1kAINN6O9DFFS1RsAvNK0qmTZZMfHWZeu10O55MHsxTs +fPY/v1Jphg+vHc2NItw0s23Rj6SJN5fgNSLhcKBdCRpw33YFy+EWA8lE2FRd5DSt +n2sNWvAOoWLrIHZo0UgrgFV2gi4QpaN+b/T+QDiq7IcwLaMSWU3ODYIFN2C/TBKZ +RIC7LWQPG0cjFJd/kWDQWB+i/MdYMOOuDo6ohh3vfkC7xNEo3lJArC3Zgf4SmO6O +BMnIdYjdchDV8dn5lSbKq1A320FIUWIxdkfx4L88J3KOGMAmuZxmnWkKN6iEg+Pb +/axuX0cHSx/j7vV01YY2Z6j698tKhP2XObH990Eqfr8zJcj0tCuKEHww7Pn8aH9B +Hvig5KeEAIbggW34jR9TUKXd1ao5HX0pKSa/37OqlG4r+XUORCV7awNSuUHU8BR0 +emDCsgRdrQ4szRgJLOA8sP14b9fz+xO3KKNrDXGYZLXFSVwGzrSC5zbh6QkC0qeY +iQIcBBABCgAGBQJUamlAAAoJEFKZtJkEN6xByy4QAMQJ45eOJtVyGtCKtIr2mPZQ +0VI60hfMB6PVFlb7DOpWIHohNl6KWzZunENelXp+VNQcj6St5iZrdOiyiMOY/FdN +0rhPAYWERchABd9WDS9ycBr8n5kWmB36Wa2r/aTOlDYJ/botigS6To6bR6Gc9FEj +4QuVnmqzMlawSz/O0HNS0HejDgUwgR3hCDAAp/Hw9PR1CRcHw2bo/B5+GEcl+w6i +AkheGXuV2zSWXf6LRLRSEQ70f6n4hs6vsuQQ35yd4UXy/t/q3l7/xeJ5TBWWiXvi +QK1tIOsUJ+/cCpWzms+IFvt+UsTQBMMuKBFqjkl4oDgtv8vf1i2NZsNo/XbzPB4h +ua5HyBuhn0/ej9zMfmMvfqZG6ZzaAGpZYRCVRcw3nc0yNnoW+g7pAJs1M3sL1BXp +UGfROG/T3yFzL+sk62moG2tDG2G7QsNVzOxRDi8bax5f3U7QW1i33o3qRbr9BfeP +yWtfVuWHisTw1rBdwwEAfYQsYUSDMXcTB9LhUWAhJAtEeLz0GOaA+IlafVwqmIdL +xTsUoNYfpgRi2Wic33oRI1UGyENtzKUu1iaHvSCEd/obvrhpx353oI4Yq8Gkr8mW +RptX9IGXa+qASZ9gMxJ1PoPAdLG5/oECVC/ORaNAL3zY9SbmGWamcWgSAeIB3iJx +QlyMYikLDzb390y+5AFXiQI3BBMBCAAhBQJUVSG/AhsDBQsJCAcDBRUKCQgLBRYC +AwEAAh4BAheAAAoJEJGmhovT96kH8pAP/3PtPkxkYNaS+PXiHBDpDnUsBiwvZt3F +sebexepAepzL8dP00oNyI7dPF2XKm4e/PHJ0nnY+tD4KKRdBeiQOL5ZywHmxZX04 +P1/Z6uCbVpGCSovcWavBkP8Ak+/CjzJUA6Z/s17D6LIpDDntn6v0abRoTy3asexG +277udP+iO+1q/mnxSiSZzNasrh973gXSeqL3oV+oY6DCSPpOSJlbI85UMU5/WnCx +PIVHvaDG8Fv5RF74d3+FVJKd7TRnUsqZ4MLI++JNXwK0O8dCQ8NsB3NF2rDnND+z +hzDlisvdiyGsQUNMnn1Czi4D/MK2/2xkdoVzKNA0v3IHlnxMWhZVLaHqUiGYUGF6 +NsB+OgXEEJRGIpukYnd/+WkLQqfzyy6Y6uUDhkwz0G5aDGyNUg7+gfDMr5dy+Hxt +gEzkcoZJWuNBzO0Yp198QNNE+QBxu9OkYw9A8wT58cHVuzFU0V+bTBrZtpbME8OW +Ly6+eDXn6CbVEu2Fc92ckQozEpRdZMdiWVtbQDY8L9qAiC+BOVqBgv5PoB8IVHrV +1GmRZwxRdlplnzviWa5Qvke5dcUy+DXmrCN+dWUql8fFt2g6EIncFYotwxz7r3+K +djCFKzG6zmMLHBCUz0exAxrH4vXXB1LdEByddgjXcol+N73D4jUyYvs12ziecGit +AU8z9nYK347XiQI6BBMBCAAkAhsDBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheABQJU +VSmNAhkBAAoJEJGmhovT96kHQUcQALfi9KohoE0JFkKfSXl5jbBJkTt38srMnZ6x +KP45F0e/ir1duFVCSyhZ+YS/n6aBMQl/qRWbzF+93RnGsTLvMi/8Oa72czlEPuYY +fFPuJAatxvA/TFZHuI++u6xAF4Oxlq0FAbEJfpw0uLSDuU9f9TlLYNP3hLudjFFd +9sJGLLs+SCeomPRKFxRRLL7/1EzdtmvvFhZZcuPsTamBb4oi+1usCO5RW1AQA5A4 +Qo4gHitBaSaBgolFZLN76UFBwBs/t0hDZPAAZa1T8EpjQrlmINFIeBYFdvjhMChG +Qc6NcfOOQofW5BDVn6GsBHYTvAgSK5G0eaB+bOAtv9LW9hDt05iEJaE5ojPT7Thi +cHoU65WL4yGAGCGcfNm+EpuNGt1IgAFGGxX6wMZy59WqtMBZANjWQdrDbCPQa3pI +ID96iN0A1HZJg7eAl0y3NM6nU7faAuW4QOoQRgxOTj0+fM6khaFmYCp5nuer3d5p +kaH6SQG4ZDVOOSaak7QlT/EFSfz2B5FZN1OIdqw5/Aw7HugOehHund5QfgRuDLSq +ZKnuGrIo9OwJIirT/TDDnsNWBTN3Pxf1h8Iut+R9Zt7LwsVjVN9+JPL8yEk4zzCx +HEL4/2c6jANQdtbQCZiHbU85JWe1NKFo/NNPpM2ysZMpKHe5RB3FLMare0IBs5BO +06nyGpTmtChBbGV4YW5kZXIgTmV1bWFubiA8YWxleGFuZGVyQGRlYmlhbi5vcmc+ +iEYEEBEIAAYFAlRVKXwACgkQFBE43aPkXWYUtQCfW61UqGPhe0atXSnkzEevKm6y +99QAn1CZ4rCVg4u/Zp6nvKncdd3cs0/NiQIcBBABAgAGBQJWBrfPAAoJEB5F+Mqd +4jsWMmYP/0izNHAqY8HvpyM7SlWixvXI2tjWSlhiC8dLv77rDLTjW/XbKh8P+6ab +xaPBg9dFxHGDBJli0U9J+Mp5AodB+b/xgA8ro/U5sGGvTVI02AE9ohPwR2W6xePO +apmkyWxOP4kfEP8bK2V/JnBdk8Rq6tce5onBWTrFQCcqs2OprlfPpbKZgQ6b/K7n +NP9uv2kutwhSxmw4NpKJmvGzf1HnWVQKaE4yqCoH9pJGyQmn+v8JBytkRIsAhlV/ +HKC8Toz5x933qAHVMW/Xf7hYWu9a15rS8NSBMOEFmvKubxYbqEPYM5uXENDCa+oG +Sbx52crfIoOfiWWSiDyV502kZ4WB0jOdwITCDp6D/Hq4HZMnoquZ9Kk7mRuOP3FG +XtAmp3rY9TGBcjH2hjA1xen0N59uaE+zNeU0wEhrSMbwUvsFt6Z/p8aS8TMZIayV +bmGi/0VfUonjIx3OPsd08CfXTk9cY+R3RKoFaJBBa2ZK2XnXj7VgbdcYx7IV4G41 +cxOHCgClBAMyzKI+UHuN4H39sQixFJW7tF0QJrYaXamuzIyh99qy5b0NvWxCiStZ +OKglmq2P61ryqzyPcbwEn+1OeXUXayZslU8M2SMRfA3qXaBUuH5fQgVzCm8Dndwd +GRbh0ceoE4BtZkZ3hmWvpPgt177eLcg7plL176bnjV0llspg+ji5iQIcBBABCAAG +BQJUZRF9AAoJEGo6ELMcEJUXk0gP/RJg5pLpPNibz09kjwsOJAlpORyd6OBxE4tO +n/bECT0mE6vBg0TdY+MO1IC0o5RkiAc9f5YizzYyLBVpfgwdrfCk+eD9mFKhn7sz +YI232AdXIO5ziC4pND+zdkSj37fxAcM4BIfeyHWKna/cmkM1tsmB1YYpxNpM56Y+ +ulAG6YJBkU0hPoUjI7eNMsV/+V/IOSuP4Z/Lhw1fw4bKow0zVc20C+dbgrBz9uKG +UrmTjLMLoEiaxn7yrYug8kQeeKaEoczyQhivQrKFZsfRMkiGaRz7qeOWrw10MCPN +a3LXwswXpxM9FGLnflOwhUiYSgD1OdmBaEEntDPX75Dp0n8pdm5GNFCuD9RpJrGI +iPm5dsU8kRMeFbUQFNOkJE5Npxv7DrmTIjFd90U3kwcwEL0Y++W+q/lbrmxgOuYm +c/c6K8WVGjsOTpEuqFvmLmhIwxzH4QCtSUTb/O2bg7PIbAk2LVMbXi4H9Fxl8YCW +b532Tu2RQ73Odvmluhj+QTFnxglEd4xiOlttwIOwqQAgLBk/GSioFfgLaGla5iab +GaWuMB6zFiDp30IDHfIchUp/jaBvWJf33UaemryRrppVv1mgs6qvKxbGmYSOn7I8 +KBasiyV2IXaUCHDdreFcCLJrl/Cso9qQcHroI127IQAB5upyN3TuS0RS/ZnAZc9y +d0JxkFNHiQIcBBABCgAGBQJUammYAAoJEFKZtJkEN6xBoC8P/2ipNFdW6rMuISzG +UcGsCQVNcil/9mZ+iOqe+7DS356vJmENvof31r2/tTHUcJcRoh7ANkR0YuvZylD8 +MFXkjrAj+X2ODSCsaugyjxWEg5XEYLnHipX7eFxzT39UJrgP/4wNu8tWDO6t/xhb +lHUichE1tvWZkUnWzhQrBKIiYGZnu0mxIEHR33PZauc4vFL2U0K8deKpo01jtbz9 +f8+ngrcTplCfCJ8H0SoR/8t4qyg2FNgcnJW7F/VVa3j6ctDBkB+NcPYjeRL8cybH +V0VUxbbG64D+WbqsspWDRj7799SELQ5emUnyok0j/e+3ffFkiKP0mpMX3RW2MfMx +d1BsAoY3IqvIdlAjrtAY2tQ3sXAyPgmcp6kKRKixLTbBjGfrptNLO2nzADvHk3/4 +OnKltlYj406A7ZgeKDWku+yQ8VSPCeFh86KwiQBgoZOJiQSRWYGT2hy8xLb4im4W +1corbfL+2+iVMu+EwQ6QFlyzaLgNPNWBXUWvK8vok55LNuHYKHxCJD3b53vBctmB +08/UpiRMDT5JTolyfwc4zbFrgb4d5lvTP0bM2qIHoEde5GyDKaUZkHvbBKokkR7n +MKhKmfWs091mG5AP03NXdmt/mllv3bRPsbJJTP0m8BMliQjvPIhKk7ngbNHefzGC +dv06psDi8Q8apCdR2bLDaLp2HC4TiQIfBDABCAAJBQJW9SIDAh0gAAoJEJGmhovT +96kHrP0P/24pnzm7zUyMFjUuZbscJxNk31K/gSWQ6S5AMPeKB/ar5OMRMkmpZZmO +X8c1Q1MxdGdRGPFzA++uWPiizc3QLQIrzI1Q2oarkjcb3FMOMpn4M5xZp/+dmuWS +rgEEF3iPom/DjpE+U/DC6/YaeJJOWLuiU799c8b9Qg+ZZcf5L1vUMT489kDL8Fgw +iThoAXQ4LgSylblguVNkSiyZAQ7g0snYD93jdBvY2KSIQ1Y9mIZPZYcZacj+CVMM +AQOAP6WmrOw6hREaYFo/0Z9tMC0QFba2hwAISS/hrBPFCFalq9E0tqClryitXdJp +0/k8QgU979pANJXmZCvmFhjcCIKg9ok7+lykFmbo+UCmRRoYoLlaw4wNfuN3TIlD +yWx7cfAVww+AwQD8E1k6jXJpqT5sY+NSbJ2bPRR+AQk3qkvU2dJqOIJxF02jp4a4 +QxypTAN+byCkJcnrl7XMcykAeCAfXIA5xRoZu44WJhHmTIAMf5SLzk889MggQrGV +KckOpvSaFDElqW54DY/erkwFiZKdt0rOmvqY4/63Btw6x7Y63THp4xf5IvFf0REc +/Eh5aC0gPilHPS9ZbuIh0tX4hrQYJ2SPQ5bU63XC+ucJrHde25dDEa9oQ/xny3Dd +233j8ofdLuBKejXXjhD/Dv3nlAEZD9VQgaF4kQcpqkz+dsgzEA3IiQI3BBMBCAAh +BQJUVSlqAhsDBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEJGmhovT96kH/JMQ +AIQLk13LPm2K7YvLrO0iq8MbM/YKpUh97sms5ItVMZJm3tGmbc4bgOJ2zAfeRRou +mMqIyv2KLuXNKdysoGIowKvukOEKv3IFv1pIXYwQ7KrRa+Pn1kfpjgoOePN/gm5f +bGZTgRe65a9XhkBPKB4emv3hrX2bWFMxtkbzDyP03oshTO/tpBFMNV+XA/Zlz3fL +zvICUzD1SHTzzTFACyFkiB68Yx4yUXXAln7LCXzHsdiM/3EuloiDZtao5t1Ax0+G +Ebo7WL3bIoR8e1q4d/PgbKPQvASHjw/s0S1gCwOnFeoShrn2ijp2/5XLVjx4hQ/C +Cv+RJxv1lNzmmZtBBMGHbl1U5rcEYs/Fpe1If1RyC83mimmMfGS5TTXOMWbqjNlR +T9bLRM/+OLvS6FWuAuFogQkCc+paVKHSEQJiN2+2XbjfrrozubB6Icegp2RwKyi9 +BRre9V6NPfEm1C8d6hmloqsK5RHXz62a25sH4mEufTxgYn7TCxx+wckBWOlDe3p2 +i1lJDv1SKQXN0ZANfmObdfUMYyhZ/j/ariRx1uhSrgPzQoRBaDMa/klqGyWQ9Yh1 +nJdeKwPZo4zriZvK5LAgnWh2IRcGiPtSdtdk5KaEAouUR8XNGEpL79+Awi3kjd5u +Emnu+ZqDDM/hq3NgzbQ51PuwBeuHpe+6S4onnc5Sh5youQINBFRVIb8BEADo4td9 +MrPJd0wt2Q0OPgdAOyxpwGgu2vh8TTL5sUsMpJEKRQdc5AyEI1/mrTINDVgTSjTd +VPQE8fb4w3GHAUg4iBPucyGLUpQd+pxYya/aqVurKjynVZPHpZzCylsdVv8WR1Bb +bVIbmPiJxmRi3irjNzsmCeUV1V8JPpMxWBdV14NTcRkeJA2JpRXp8ZHhO9WryZV9 +uxxMiDS8NIlAI6Ljt1swrJQOv2sHk9Gbrgmpd1zTYjJzORXZHsQdQ6XAy/4yWwt8 +Gl+eg5ZRSyAE80TEIH0FFJcQ/9YZK/j9bxN+wGiuW4goNdBl84NJ8aq1G0NXDjyH +9WWypWfgURUoNBVmSek2ibRxSriqdFH8Tt+98w1a8EdLJKbPb0A5sV6PqqKUP59a +1AZ1kA0tLjh89Wz6+qjg9YhiCN7SO6eikdPWT/0r3SHtiztgDjgcqTFDNoFZdmZc +jb6eD0nuoRRfWXVZ57aX8WwD37xljKt7e06W7gsq4fXyRYZvQpNHga+83YCkVbxu +pPgPjgq4F/JquIUVfOx3CMmLsvE5p2U0zLGzG1WYgW5AShDfo2LXtjOz4wmRFnfY +pFO+CreWiG3OElwae77JiHXSc7+8pCOE3Buh9SRI8ioJPhb4uxV3paFH9uDTQjpC +nVMI5uOHg0tmWZgTShB/tzDV1KFVTZCw3fABxwARAQABiQIfBBgBCAAJBQJUVSG/ +AhsMAAoJEJGmhovT96kHb/0P/0LXAOXeyTDWOHEoPFKIXS4y9tl2PA6hq1HOgthP +1B2P6eIjpB7UGTSieWpKeqkkv7SZFTol2H0JlhZlhh1IkxS/aHHAl2Km6TLkk6QL +GGkKOFFAiU51iVkJQumbTKMlx11DXA0Jy6mVsUWoz3Ua9cFwrhuCRpKxW61xTEaX +dksgOUBKWH+mF8MtJtRedwHXjmNxaKTAKEsjmPFPn8i75D48JIbq9L+rHLxFTeSR +LShj7lZR1I24+UofA2Tllh4V14rSsUkfIYsKuwCGenJ+sPhpwqHohfJzTewXk+TK +wkilwVgTg7AYCeywP7XqkhA4om9aJRc1cqPcrknsXJLz4Vp7JX8bCtRqF2JT7wsM +wtHMNAtItLa+WYnkvt9/ng9Zt5i0fHZBwfVazWP+/4LAkb9fE4vO2IusV0jK00Sk +7Gt65A32qY75Lze6NRUk2gwizMLIdMvag9AuIUH52RScNVoVXIkmw1q57KshBL1M +VWRd7DUpFGpw8HKkqNlJKPAv+UsJAp7rSkfH9CAYwFzjbs7BST5Cuynac0CgZGQO +F0793mKAsbMePuEIzkR0ZdA/F0Mar9/tQLAtU3pXRrThkLUNmr8Qm9rPGTjrNv7k +ANWsgd4bu0PW5SVm+eFjoTRpNI9P/xrCF8fgLcZ2JPO/wKqyIDcKxEZq978lxWDm +CwGc +=b9yF +-----END PGP PUBLIC KEY BLOCK----- diff --git a/watch b/watch new file mode 100644 index 000000000..28eb05686 --- /dev/null +++ b/watch @@ -0,0 +1,7 @@ +version=3 + +opts="repacksuffix=+ds,dversionmangle=s/\+(repack|dfsg|ds|deb)\d*$//,filenamemangle=s/.+\/v?(\d\S*)\.tar\.gz/restic-$1\.tar\.gz/,pgpsigurlmangle=s#^(https://github.com/restic/restic)/archive/v(.*)\.tar\.gz#$1/releases/download/v$2/restic-$2.tar.gz.asc#" \ + https://github.com/restic/restic/releases .*/v?([\d\.]+)\.tar\.gz + +# Release tags are signed with +# CF8F 18F2 8445 7597 3F79 D4E1 91A6 868B D3F7 A907 -- cgit v1.2.3 From b755beef4cc37a2c29b39bfccd386b9b22299855 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A9lix=20Sipma?= Date: Wed, 9 Jan 2019 09:32:39 -0500 Subject: Import restic_0.9.4+ds.orig.tar.xz [dgit import orig restic_0.9.4+ds.orig.tar.xz] --- .github/ISSUE_TEMPLATE.md | 27 + .github/ISSUE_TEMPLATE/Bug.md | 93 + .github/ISSUE_TEMPLATE/Feature.md | 57 + .github/PULL_REQUEST_TEMPLATE.md | 36 + .gitignore | 2 + .hound.yml | 2 + .travis.yml | 61 + CHANGELOG.md | 1701 +++++++++++++++++ CONTRIBUTING.md | 214 +++ GOVERNANCE.md | 27 + LICENSE | 25 + Makefile | 13 + README.rst | 137 ++ VERSION | 1 + appveyor.yml | 32 + build.go | 632 +++++++ changelog/0.6.0_2017-05-29/issue-953 | 8 + changelog/0.6.0_2017-05-29/issue-965 | 11 + changelog/0.6.0_2017-05-29/pull-962 | 15 + changelog/0.6.1_2017-06-01/issue-985 | 8 + changelog/0.6.1_2017-06-01/pull-891 | 6 + changelog/0.6.1_2017-06-01/pull-974 | 7 + changelog/0.7.0_2017-07-01/issue-1013 | 9 + changelog/0.7.0_2017-07-01/issue-1021 | 7 + changelog/0.7.0_2017-07-01/issue-1029 | 8 + changelog/0.7.0_2017-07-01/issue-512 | 4 + changelog/0.7.0_2017-07-01/issue-636 | 9 + changelog/0.7.0_2017-07-01/issue-965 | 8 + changelog/0.7.0_2017-07-01/issue-989 | 8 + changelog/0.7.0_2017-07-01/pull-975 | 4 + changelog/0.7.0_2017-07-01/pull-998 | 5 + changelog/0.7.1_2017-07-22/issue-1055 | 10 + changelog/0.7.1_2017-07-22/issue-1067 | 8 + changelog/0.7.1_2017-07-22/issue-1073 | 8 + changelog/0.7.1_2017-07-22/issue-1081 | 4 + changelog/0.7.1_2017-07-22/pull-1080 | 5 + changelog/0.7.1_2017-07-22/pull-1082 | 3 + changelog/0.7.1_2017-07-22/pull-1115 | 9 + changelog/0.7.2_2017-09-13/issue-1132 | 8 + changelog/0.7.2_2017-09-13/issue-1167 | 8 + changelog/0.7.2_2017-09-13/issue-1179 | 4 + changelog/0.7.2_2017-09-13/issue-1208 | 7 + changelog/0.7.2_2017-09-13/issue-317 | 10 + changelog/0.7.2_2017-09-13/issues-697 | 4 + changelog/0.7.2_2017-09-13/pull-1044 | 3 + changelog/0.7.2_2017-09-13/pull-1061 | 3 + changelog/0.7.2_2017-09-13/pull-1126 | 7 + changelog/0.7.2_2017-09-13/pull-1134 | 5 + changelog/0.7.2_2017-09-13/pull-1144 | 3 + changelog/0.7.2_2017-09-13/pull-1149 | 8 + changelog/0.7.2_2017-09-13/pull-1164 | 3 + changelog/0.7.2_2017-09-13/pull-1191 | 8 + changelog/0.7.2_2017-09-13/pull-1196 | 3 + changelog/0.7.2_2017-09-13/pull-1203 | 5 + changelog/0.7.2_2017-09-13/pull-1205 | 3 + changelog/0.7.3_2017-09-20/issue-1246 | 9 + changelog/0.8.0_2017-11-26/issue-1102 | 9 + changelog/0.8.0_2017-11-26/issue-1114 | 10 + changelog/0.8.0_2017-11-26/issue-1216 | 9 + changelog/0.8.0_2017-11-26/issue-1256 | 7 + changelog/0.8.0_2017-11-26/issue-1271 | 6 + changelog/0.8.0_2017-11-26/issue-1274 | 9 + changelog/0.8.0_2017-11-26/issue-1291 | 8 + changelog/0.8.0_2017-11-26/issue-1367 | 7 + changelog/0.8.0_2017-11-26/issue-1445 | 18 + changelog/0.8.0_2017-11-26/issue-448 | 9 + changelog/0.8.0_2017-11-26/issue-510 | 7 + changelog/0.8.0_2017-11-26/pull-1040 | 23 + changelog/0.8.0_2017-11-26/pull-1249 | 6 + changelog/0.8.0_2017-11-26/pull-1269 | 6 + changelog/0.8.0_2017-11-26/pull-1281 | 7 + changelog/0.8.0_2017-11-26/pull-1317 | 7 + changelog/0.8.0_2017-11-26/pull-1319 | 8 + changelog/0.8.0_2017-11-26/pull-1353 | 3 + changelog/0.8.0_2017-11-26/pull-1437 | 10 + changelog/0.8.1_2017-12-27/issue-1457 | 4 + changelog/0.8.1_2017-12-27/pull-1436 | 9 + changelog/0.8.1_2017-12-27/pull-1439 | 6 + changelog/0.8.1_2017-12-27/pull-1452 | 9 + changelog/0.8.1_2017-12-27/pull-1454 | 6 + changelog/0.8.1_2017-12-27/pull-1459 | 9 + changelog/0.8.1_2017-12-27/pull-1462 | 8 + changelog/0.8.2_2018-02-17/issue-1506 | 4 + changelog/0.8.2_2018-02-17/issue-1512 | 9 + changelog/0.8.2_2018-02-17/issue-1522 | 8 + changelog/0.8.2_2018-02-17/issue-1528 | 4 + changelog/0.8.2_2018-02-17/issue-1541 | 7 + changelog/0.8.2_2018-02-17/issue-1567 | 9 + changelog/0.8.2_2018-02-17/issue-1590 | 7 + changelog/0.8.2_2018-02-17/pull-1507 | 3 + changelog/0.8.2_2018-02-17/pull-1538 | 7 + changelog/0.8.2_2018-02-17/pull-1549 | 7 + changelog/0.8.2_2018-02-17/pull-1554 | 7 + changelog/0.8.2_2018-02-17/pull-1564 | 10 + changelog/0.8.2_2018-02-17/pull-1579 | 3 + changelog/0.8.2_2018-02-17/pull-1584 | 12 + changelog/0.8.2_2018-02-17/pull-1589 | 17 + changelog/0.8.2_2018-02-17/pull-1594 | 7 + changelog/0.8.2_2018-02-17/pull-1595 | 11 + changelog/0.8.3_2018-02-26/issue-1497 | 8 + changelog/0.8.3_2018-02-26/issue-1633 | 7 + changelog/0.8.3_2018-02-26/issue-1641 | 10 + changelog/0.8.3_2018-02-26/pull-1560 | 5 + changelog/0.8.3_2018-02-26/pull-1623 | 12 + changelog/0.8.3_2018-02-26/pull-1634 | 7 + changelog/0.8.3_2018-02-26/pull-1638 | 16 + changelog/0.9.0_2018-05-21/issue-1433 | 12 + changelog/0.9.0_2018-05-21/issue-1561 | 10 + changelog/0.9.0_2018-05-21/issue-1608 | 7 + changelog/0.9.0_2018-05-21/issue-1652 | 9 + changelog/0.9.0_2018-05-21/issue-1665 | 27 + changelog/0.9.0_2018-05-21/issue-1721 | 8 + changelog/0.9.0_2018-05-21/issue-1730 | 11 + changelog/0.9.0_2018-05-21/issue-1758 | 8 + changelog/0.9.0_2018-05-21/issue-549 | 43 + changelog/0.9.0_2018-05-21/pull-1552 | 13 + changelog/0.9.0_2018-05-21/pull-1647 | 9 + changelog/0.9.0_2018-05-21/pull-1648 | 6 + changelog/0.9.0_2018-05-21/pull-1649 | 3 + changelog/0.9.0_2018-05-21/pull-1684 | 6 + changelog/0.9.0_2018-05-21/pull-1709 | 7 + changelog/0.9.0_2018-05-21/pull-1720 | 7 + changelog/0.9.0_2018-05-21/pull-1735 | 9 + changelog/0.9.0_2018-05-21/pull-1746 | 7 + changelog/0.9.0_2018-05-21/pull-1782 | 7 + changelog/0.9.1_2018-06-10/issue-1801 | 9 + changelog/0.9.1_2018-06-10/issue-1822 | 9 + changelog/0.9.1_2018-06-10/issue-1825 | 12 + changelog/0.9.1_2018-06-10/issue-1833 | 9 + changelog/0.9.1_2018-06-10/issue-1834 | 8 + changelog/0.9.2_2018-08-06/issue-1854 | 16 + changelog/0.9.2_2018-08-06/issue-1870 | 6 + changelog/0.9.2_2018-08-06/issue-1880 | 12 + changelog/0.9.2_2018-08-06/issue-1893 | 8 + changelog/0.9.2_2018-08-06/issue-1906 | 8 + changelog/0.9.2_2018-08-06/pull-1729 | 4 + changelog/0.9.2_2018-08-06/pull-1772 | 6 + changelog/0.9.2_2018-08-06/pull-1853 | 6 + changelog/0.9.2_2018-08-06/pull-1861 | 6 + changelog/0.9.2_2018-08-06/pull-1882 | 8 + changelog/0.9.2_2018-08-06/pull-1901 | 9 + changelog/0.9.3_2018-10-13/issue-1766 | 7 + changelog/0.9.3_2018-10-13/issue-1909 | 14 + changelog/0.9.3_2018-10-13/issue-1935 | 8 + changelog/0.9.3_2018-10-13/issue-1941 | 15 + changelog/0.9.3_2018-10-13/issue-1967 | 7 + changelog/0.9.3_2018-10-13/issue-1978 | 12 + changelog/0.9.3_2018-10-13/issue-2028 | 7 + changelog/0.9.3_2018-10-13/pull-1780 | 13 + changelog/0.9.3_2018-10-13/pull-1876 | 7 + changelog/0.9.3_2018-10-13/pull-1891 | 7 + changelog/0.9.3_2018-10-13/pull-1920 | 8 + changelog/0.9.3_2018-10-13/pull-1949 | 15 + changelog/0.9.3_2018-10-13/pull-1953 | 7 + changelog/0.9.3_2018-10-13/pull-1962 | 13 + changelog/0.9.4_2019-01-06/issue-1605 | 11 + changelog/0.9.4_2019-01-06/issue-1989 | 7 + changelog/0.9.4_2019-01-06/issue-2040 | 11 + changelog/0.9.4_2019-01-06/issue-2089 | 9 + changelog/0.9.4_2019-01-06/issue-2097 | 12 + changelog/0.9.4_2019-01-06/pull-2017 | 11 + changelog/0.9.4_2019-01-06/pull-2068 | 6 + changelog/0.9.4_2019-01-06/pull-2070 | 7 + changelog/0.9.4_2019-01-06/pull-2086 | 7 + changelog/0.9.4_2019-01-06/pull-2094 | 8 + changelog/0.9.4_2019-01-06/pull-2095 | 7 + changelog/CHANGELOG.tmpl | 32 + changelog/TEMPLATE | 12 + changelog/changelog-github.tmpl | 31 + cmd/restic/.gitignore | 1 + cmd/restic/cleanup.go | 83 + cmd/restic/cmd_backup.go | 552 ++++++ cmd/restic/cmd_cache.go | 166 ++ cmd/restic/cmd_cat.go | 190 ++ cmd/restic/cmd_check.go | 298 +++ cmd/restic/cmd_debug.go | 172 ++ cmd/restic/cmd_diff.go | 356 ++++ cmd/restic/cmd_dump.go | 182 ++ cmd/restic/cmd_find.go | 565 ++++++ cmd/restic/cmd_forget.go | 246 +++ cmd/restic/cmd_generate.go | 94 + cmd/restic/cmd_init.go | 57 + cmd/restic/cmd_key.go | 227 +++ cmd/restic/cmd_list.go | 80 + cmd/restic/cmd_ls.go | 231 +++ cmd/restic/cmd_migrate.go | 108 ++ cmd/restic/cmd_mount.go | 193 ++ cmd/restic/cmd_options.go | 29 + cmd/restic/cmd_prune.go | 307 +++ cmd/restic/cmd_rebuild_index.go | 95 + cmd/restic/cmd_recover.go | 148 ++ cmd/restic/cmd_restore.go | 169 ++ cmd/restic/cmd_self_update.go | 73 + cmd/restic/cmd_snapshots.go | 250 +++ cmd/restic/cmd_stats.go | 324 ++++ cmd/restic/cmd_tag.go | 143 ++ cmd/restic/cmd_unlock.go | 51 + cmd/restic/cmd_version.go | 26 + cmd/restic/doc.go | 2 + cmd/restic/exclude.go | 282 +++ cmd/restic/exclude_test.go | 164 ++ cmd/restic/find.go | 76 + cmd/restic/flags_test.go | 24 + cmd/restic/format.go | 95 + cmd/restic/global.go | 686 +++++++ cmd/restic/global_debug.go | 97 + cmd/restic/global_release.go | 6 + cmd/restic/integration_fuse_test.go | 220 +++ cmd/restic/integration_helpers_test.go | 234 +++ cmd/restic/integration_helpers_unix_test.go | 70 + cmd/restic/integration_helpers_windows_test.go | 49 + cmd/restic/integration_test.go | 1190 ++++++++++++ cmd/restic/local_layout_test.go | 41 + cmd/restic/lock.go | 133 ++ cmd/restic/main.go | 111 ++ cmd/restic/testdata/backup-data.tar.gz | Bin 0 -> 11704 bytes cmd/restic/testdata/old-index-repo.tar.gz | Bin 0 -> 6307407 bytes .../testdata/repo-restore-permissions-test.tar.gz | Bin 0 -> 4174 bytes cmd/restic/testdata/repo-same-timestamps.tar.gz | Bin 0 -> 3243 bytes cmd/restic/testdata/small-repo.tar.gz | Bin 0 -> 11747 bytes cmd/restic/testdata/test.hl.tar.gz | Bin 0 -> 198 bytes contrib/restic.spec | 72 + doc.go | 11 + doc/.gitignore | 2 + doc/010_introduction.rst | 19 + doc/020_installation.rst | 306 +++ doc/030_preparing_a_new_repo.rst | 531 ++++++ doc/040_backup.rst | 380 ++++ doc/045_working_with_repos.rst | 115 ++ doc/050_restore.rst | 122 ++ doc/060_forget.rst | 230 +++ doc/070_encryption.rst | 51 + doc/075_scripting.rst | 39 + doc/080_examples.rst | 342 ++++ doc/090_participating.rst | 142 ++ doc/100_references.rst | 23 + doc/110_talks.rst | 34 + doc/Makefile | 25 + doc/REST_backend.rst | 145 ++ doc/_static/css/restic.css | 10 + doc/_static/favicon.ico | Bin 0 -> 1406 bytes doc/bash-completion.sh | 1663 ++++++++++++++++ doc/cache.rst | 36 + doc/conf.py | 110 ++ doc/design.rst | 608 ++++++ doc/developer_information.rst | 123 ++ doc/faq.rst | 175 ++ doc/images/aws_s3/01_aws_start.png | Bin 0 -> 193277 bytes doc/images/aws_s3/02_aws_menu.png | Bin 0 -> 203613 bytes doc/images/aws_s3/03_buckets_list_before.png | Bin 0 -> 96539 bytes doc/images/aws_s3/04_bucket_create_start.png | Bin 0 -> 114747 bytes doc/images/aws_s3/05_bucket_create_review.png | Bin 0 -> 118553 bytes doc/images/aws_s3/06_buckets_list_after.png | Bin 0 -> 98236 bytes doc/images/aws_s3/07_iam_start.png | Bin 0 -> 185550 bytes doc/images/aws_s3/08_user_list.png | Bin 0 -> 99151 bytes doc/images/aws_s3/09_user_name.png | Bin 0 -> 124042 bytes doc/images/aws_s3/10_user_pre_policy.png | Bin 0 -> 179882 bytes doc/images/aws_s3/11_policy_start.png | Bin 0 -> 131397 bytes doc/images/aws_s3/13_policy_review.png | Bin 0 -> 109037 bytes doc/images/aws_s3/14_user_attach_policy.png | Bin 0 -> 127925 bytes doc/images/aws_s3/15_user_review.png | Bin 0 -> 106426 bytes doc/images/aws_s3/16_user_created.png | Bin 0 -> 118017 bytes doc/index.rst | 22 + doc/logo/font/Lemon-Regular.ttf | Bin 0 -> 34648 bytes doc/logo/font/OFL.txt | 93 + doc/logo/logo.png | Bin 0 -> 170435 bytes doc/man/restic-backup.1 | 152 ++ doc/man/restic-cache.1 | 107 ++ doc/man/restic-cat.1 | 95 + doc/man/restic-check.1 | 116 ++ doc/man/restic-diff.1 | 115 ++ doc/man/restic-dump.1 | 112 ++ doc/man/restic-find.1 | 161 ++ doc/man/restic-forget.1 | 158 ++ doc/man/restic-generate.1 | 108 ++ doc/man/restic-init.1 | 95 + doc/man/restic-key.1 | 99 + doc/man/restic-list.1 | 95 + doc/man/restic-ls.1 | 130 ++ doc/man/restic-migrate.1 | 100 + doc/man/restic-mount.1 | 160 ++ doc/man/restic-prune.1 | 96 + doc/man/restic-rebuild-index.1 | 96 + doc/man/restic-recover.1 | 97 + doc/man/restic-restore.1 | 128 ++ doc/man/restic-self-update.1 | 102 + doc/man/restic-snapshots.1 | 115 ++ doc/man/restic-stats.1 | 131 ++ doc/man/restic-tag.1 | 126 ++ doc/man/restic-unlock.1 | 99 + doc/man/restic-version.1 | 96 + doc/man/restic.1 | 94 + doc/manual_rest.rst | 391 ++++ doc/requirements.txt | 2 + doc/test_irreducibility.gap | 25 + doc/zsh-completion.zsh | 20 + docker/Dockerfile | 7 + docker/README.md | 24 + docker/build.sh | 11 + go.mod | 55 + go.sum | 108 ++ helpers/.gitignore | 2 + helpers/build-release-binaries/main.go | 269 +++ helpers/prepare-release/main.go | 461 +++++ internal/archiver/archiver.go | 824 ++++++++ internal/archiver/archiver_test.go | 1902 +++++++++++++++++++ internal/archiver/blob_saver.go | 176 ++ internal/archiver/blob_saver_test.go | 115 ++ internal/archiver/buffer.go | 89 + internal/archiver/doc.go | 12 + internal/archiver/file_saver.go | 251 +++ internal/archiver/file_saver_test.go | 97 + internal/archiver/index_uploader.go | 53 + internal/archiver/scanner.go | 107 ++ internal/archiver/scanner_test.go | 329 ++++ internal/archiver/testing.go | 343 ++++ internal/archiver/testing_test.go | 530 ++++++ internal/archiver/tree.go | 281 +++ internal/archiver/tree_saver.go | 171 ++ internal/archiver/tree_saver_test.go | 120 ++ internal/archiver/tree_test.go | 464 +++++ internal/backend/azure/azure.go | 403 ++++ internal/backend/azure/azure_test.go | 218 +++ internal/backend/azure/config.go | 57 + internal/backend/azure/config_test.go | 40 + internal/backend/b2/b2.go | 332 ++++ internal/backend/b2/b2_test.go | 99 + internal/backend/b2/config.go | 93 + internal/backend/b2/config_test.go | 92 + internal/backend/backend_error.go | 84 + internal/backend/backend_retry.go | 148 ++ internal/backend/backend_retry_test.go | 249 +++ internal/backend/doc.go | 4 + internal/backend/foreground_solaris.go | 28 + internal/backend/foreground_unix.go | 79 + internal/backend/foreground_windows.go | 21 + internal/backend/gs/config.go | 60 + internal/backend/gs/config_test.go | 40 + internal/backend/gs/gs.go | 459 +++++ internal/backend/gs/gs_test.go | 122 ++ internal/backend/http_transport.go | 113 ++ internal/backend/layout.go | 168 ++ internal/backend/layout_default.go | 79 + internal/backend/layout_rest.go | 54 + internal/backend/layout_s3legacy.go | 77 + internal/backend/layout_test.go | 450 +++++ internal/backend/local/config.go | 27 + internal/backend/local/doc.go | 2 + internal/backend/local/layout_test.go | 86 + internal/backend/local/local.go | 295 +++ internal/backend/local/local_test.go | 136 ++ internal/backend/local/local_unix.go | 14 + internal/backend/local/local_windows.go | 12 + internal/backend/location/location.go | 113 ++ internal/backend/location/location_test.go | 339 ++++ internal/backend/mem/mem_backend.go | 223 +++ internal/backend/mem/mem_backend_test.go | 66 + internal/backend/paths.go | 26 + internal/backend/rclone/backend.go | 317 ++++ internal/backend/rclone/backend_test.go | 66 + internal/backend/rclone/config.go | 39 + internal/backend/rclone/config_test.go | 34 + internal/backend/rclone/stdio_conn.go | 74 + internal/backend/rclone/stdio_conn_go110.go | 25 + internal/backend/rclone/stdio_conn_other.go | 22 + internal/backend/rest/config.go | 48 + internal/backend/rest/config_test.go | 52 + internal/backend/rest/rest.go | 463 +++++ internal/backend/rest/rest_int_test.go | 150 ++ internal/backend/rest/rest_test.go | 154 ++ internal/backend/s3/config.go | 87 + internal/backend/s3/config_test.go | 113 ++ internal/backend/s3/s3.go | 523 +++++ internal/backend/s3/s3_test.go | 328 ++++ internal/backend/semaphore.go | 68 + internal/backend/sftp/config.go | 78 + internal/backend/sftp/config_test.go | 90 + internal/backend/sftp/doc.go | 3 + internal/backend/sftp/layout_test.go | 89 + internal/backend/sftp/sftp.go | 537 ++++++ internal/backend/sftp/sftp_test.go | 95 + internal/backend/sftp/sshcmd_test.go | 52 + internal/backend/shell_split.go | 76 + internal/backend/shell_split_test.go | 105 ++ internal/backend/swift/config.go | 110 ++ internal/backend/swift/config_test.go | 72 + internal/backend/swift/swift.go | 322 ++++ internal/backend/swift/swift_test.go | 125 ++ internal/backend/test/benchmarks.go | 165 ++ internal/backend/test/doc.go | 42 + internal/backend/test/suite.go | 185 ++ internal/backend/test/tests.go | 827 ++++++++ internal/backend/test/tests_test.go | 68 + .../backend/testdata/repo-layout-default.tar.gz | Bin 0 -> 38257 bytes .../backend/testdata/repo-layout-s3legacy.tar.gz | Bin 0 -> 38096 bytes internal/backend/utils.go | 51 + internal/backend/utils_test.go | 147 ++ internal/cache/backend.go | 231 +++ internal/cache/backend_test.go | 174 ++ internal/cache/cache.go | 284 +++ internal/cache/dir.go | 98 + internal/cache/file.go | 222 +++ internal/cache/file_test.go | 259 +++ internal/cache/testing.go | 20 + internal/checker/checker.go | 785 ++++++++ internal/checker/checker_test.go | 389 ++++ internal/checker/testdata/checker-test-repo.tar.gz | Bin 0 -> 6307756 bytes .../duplicate-packs-in-index-test-repo.tar.gz | Bin 0 -> 1943688 bytes internal/checker/testing.go | 52 + internal/crypto/crypto.go | 371 ++++ internal/crypto/crypto_int_test.go | 192 ++ internal/crypto/crypto_test.go | 302 +++ internal/crypto/doc.go | 2 + internal/crypto/kdf.go | 102 + internal/crypto/kdf_test.go | 14 + internal/debug/debug.go | 214 +++ internal/debug/debug_release.go | 6 + internal/debug/doc.go | 2 + internal/debug/hooks.go | 28 + internal/debug/hooks_release.go | 9 + internal/debug/log_test.go | 34 + internal/debug/round_tripper_debug.go | 95 + internal/debug/round_tripper_release.go | 11 + internal/errors/doc.go | 2 + internal/errors/errors.go | 53 + internal/errors/fatal.go | 38 + internal/filter/doc.go | 5 + internal/filter/filter.go | 189 ++ internal/filter/filter_test.go | 389 ++++ internal/filter/testdata/libreoffice.txt.bz2 | Bin 0 -> 102857 bytes internal/fs/const.go | 17 + internal/fs/const_unix.go | 8 + internal/fs/const_windows.go | 6 + internal/fs/deviceid_unix.go | 30 + internal/fs/deviceid_windows.go | 15 + internal/fs/doc.go | 3 + internal/fs/file.go | 118 ++ internal/fs/file_unix.go | 58 + internal/fs/file_windows.go | 98 + internal/fs/fs_helpers.go | 45 + internal/fs/fs_local.go | 96 + internal/fs/fs_reader.go | 289 +++ internal/fs/fs_reader_test.go | 319 ++++ internal/fs/fs_track.go | 54 + internal/fs/helpers.go | 63 + internal/fs/interface.go | 38 + internal/fs/path_prefix.go | 42 + internal/fs/path_prefix_test.go | 59 + internal/fs/stat.go | 34 + internal/fs/stat_bsd.go | 36 + internal/fs/stat_test.go | 31 + internal/fs/stat_unix.go | 36 + internal/fs/stat_windows.go | 31 + internal/fuse/blob_size_cache.go | 37 + internal/fuse/dir.go | 220 +++ internal/fuse/file.go | 184 ++ internal/fuse/file_test.go | 154 ++ internal/fuse/link.go | 47 + internal/fuse/meta_dir.go | 90 + internal/fuse/other.go | 43 + internal/fuse/root.go | 75 + internal/fuse/snapshots_dir.go | 554 ++++++ internal/hashing/reader.go | 29 + internal/hashing/reader_test.go | 73 + internal/hashing/writer.go | 31 + internal/hashing/writer_test.go | 74 + internal/index/index.go | 407 ++++ internal/index/index_test.go | 475 +++++ internal/limiter/limiter.go | 25 + internal/limiter/limiter_backend.go | 68 + internal/limiter/static_limiter.go | 98 + internal/migrations/doc.go | 2 + internal/migrations/interface.go | 22 + internal/migrations/list.go | 8 + internal/migrations/s3_layout.go | 117 ++ internal/mock/backend.go | 136 ++ internal/mock/repository.go | 141 ++ internal/options/options.go | 218 +++ internal/options/options_test.go | 312 +++ internal/pack/doc.go | 2 + internal/pack/pack.go | 327 ++++ internal/pack/pack_internal_test.go | 110 ++ internal/pack/pack_test.go | 145 ++ internal/repository/doc.go | 28 + internal/repository/index.go | 569 ++++++ internal/repository/index_test.go | 492 +++++ internal/repository/key.go | 302 +++ internal/repository/master_index.go | 258 +++ internal/repository/master_index_test.go | 123 ++ internal/repository/packer_manager.go | 161 ++ internal/repository/packer_manager_test.go | 168 ++ internal/repository/parallel.go | 65 + internal/repository/parallel_test.go | 129 ++ internal/repository/pool.go | 21 + internal/repository/repack.go | 114 ++ internal/repository/repack_test.go | 253 +++ internal/repository/repository.go | 737 ++++++++ internal/repository/repository_test.go | 500 +++++ internal/repository/testdata/test-repo.tar.gz | Bin 0 -> 6307756 bytes internal/repository/testing.go | 108 ++ internal/restic/backend.go | 61 + internal/restic/backend_find.go | 86 + internal/restic/backend_find_test.go | 66 + internal/restic/blob.go | 120 ++ internal/restic/blob_set.go | 109 ++ internal/restic/blob_test.go | 41 + internal/restic/buffer.go | 21 + internal/restic/cache.go | 37 + internal/restic/config.go | 91 + internal/restic/config_test.go | 55 + internal/restic/doc.go | 5 + internal/restic/duration.go | 137 ++ internal/restic/duration_test.go | 96 + internal/restic/file.go | 62 + internal/restic/file_test.go | 28 + internal/restic/find.go | 39 + internal/restic/find_test.go | 139 ++ internal/restic/hardlinks_index.go | 57 + internal/restic/hardlinks_index_test.go | 35 + internal/restic/id.go | 126 ++ internal/restic/id_int_test.go | 16 + internal/restic/id_test.go | 60 + internal/restic/ids.go | 69 + internal/restic/ids_test.go | 55 + internal/restic/idset.go | 111 ++ internal/restic/idset_test.go | 32 + internal/restic/lock.go | 298 +++ internal/restic/lock_test.go | 258 +++ internal/restic/lock_unix.go | 50 + internal/restic/lock_windows.go | 25 + internal/restic/node.go | 700 +++++++ internal/restic/node_darwin.go | 11 + internal/restic/node_freebsd.go | 11 + internal/restic/node_linux.go | 37 + internal/restic/node_netbsd.go | 27 + internal/restic/node_openbsd.go | 27 + internal/restic/node_solaris.go | 27 + internal/restic/node_test.go | 246 +++ internal/restic/node_unix.go | 32 + internal/restic/node_unix_test.go | 135 ++ internal/restic/node_windows.go | 76 + internal/restic/node_xattr.go | 42 + internal/restic/progress.go | 233 +++ internal/restic/progress_unix.go | 22 + internal/restic/progress_unix_with_siginfo.go | 23 + internal/restic/rand_reader.go | 81 + internal/restic/readerat.go | 41 + internal/restic/repository.go | 67 + internal/restic/rewind_reader.go | 90 + internal/restic/rewind_reader_test.go | 154 ++ internal/restic/snapshot.go | 220 +++ internal/restic/snapshot_find.go | 108 ++ internal/restic/snapshot_policy.go | 239 +++ internal/restic/snapshot_policy_test.go | 271 +++ internal/restic/snapshot_test.go | 16 + internal/restic/tag_list.go | 62 + internal/restic/testdata/filter_snapshots_0 | 213 +++ internal/restic/testdata/filter_snapshots_1 | 41 + internal/restic/testdata/filter_snapshots_2 | 28 + internal/restic/testdata/filter_snapshots_3 | 213 +++ internal/restic/testdata/filter_snapshots_4 | 28 + internal/restic/testdata/filter_snapshots_5 | 28 + internal/restic/testdata/filter_snapshots_6 | 147 ++ internal/restic/testdata/filter_snapshots_7 | 15 + internal/restic/testdata/filter_snapshots_8 | 17 + internal/restic/testdata/filter_snapshots_9 | 1 + internal/restic/testdata/policy_keep_snapshots_0 | 1782 ++++++++++++++++++ internal/restic/testdata/policy_keep_snapshots_1 | 184 ++ internal/restic/testdata/policy_keep_snapshots_10 | 187 ++ internal/restic/testdata/policy_keep_snapshots_11 | 40 + internal/restic/testdata/policy_keep_snapshots_12 | 76 + internal/restic/testdata/policy_keep_snapshots_13 | 81 + internal/restic/testdata/policy_keep_snapshots_14 | 112 ++ internal/restic/testdata/policy_keep_snapshots_15 | 135 ++ internal/restic/testdata/policy_keep_snapshots_16 | 60 + internal/restic/testdata/policy_keep_snapshots_17 | 206 ++ internal/restic/testdata/policy_keep_snapshots_18 | 416 ++++ internal/restic/testdata/policy_keep_snapshots_19 | 108 ++ internal/restic/testdata/policy_keep_snapshots_2 | 274 +++ internal/restic/testdata/policy_keep_snapshots_20 | 442 +++++ internal/restic/testdata/policy_keep_snapshots_21 | 22 + internal/restic/testdata/policy_keep_snapshots_22 | 22 + internal/restic/testdata/policy_keep_snapshots_23 | 54 + internal/restic/testdata/policy_keep_snapshots_24 | 310 +++ internal/restic/testdata/policy_keep_snapshots_25 | 310 +++ internal/restic/testdata/policy_keep_snapshots_26 | 1044 ++++++++++ internal/restic/testdata/policy_keep_snapshots_27 | 150 ++ internal/restic/testdata/policy_keep_snapshots_28 | 374 ++++ internal/restic/testdata/policy_keep_snapshots_29 | 1132 +++++++++++ internal/restic/testdata/policy_keep_snapshots_3 | 1914 +++++++++++++++++++ internal/restic/testdata/policy_keep_snapshots_4 | 1988 ++++++++++++++++++++ internal/restic/testdata/policy_keep_snapshots_5 | 364 ++++ internal/restic/testdata/policy_keep_snapshots_6 | 58 + internal/restic/testdata/policy_keep_snapshots_7 | 184 ++ internal/restic/testdata/policy_keep_snapshots_8 | 544 ++++++ internal/restic/testdata/policy_keep_snapshots_9 | 120 ++ internal/restic/testdata/used_blobs_snapshot0 | 23 + internal/restic/testdata/used_blobs_snapshot1 | 15 + internal/restic/testdata/used_blobs_snapshot2 | 24 + internal/restic/testing.go | 215 +++ internal/restic/testing_test.go | 62 + internal/restic/tree.go | 100 + internal/restic/tree_test.go | 115 ++ internal/restorer/doc.go | 33 + internal/restorer/filepacktraverser.go | 52 + internal/restorer/filerestorer.go | 324 ++++ internal/restorer/filerestorer_test.go | 212 +++ internal/restorer/fileswriter.go | 70 + internal/restorer/fileswriter_test.go | 44 + internal/restorer/packcache.go | 243 +++ internal/restorer/packcache_test.go | 305 +++ internal/restorer/packheap.go | 51 + internal/restorer/packqueue.go | 224 +++ internal/restorer/packqueue_test.go | 236 +++ internal/restorer/restorer.go | 342 ++++ internal/restorer/restorer_test.go | 696 +++++++ internal/restorer/restorer_unix_test.go | 61 + internal/selfupdate/download.go | 178 ++ internal/selfupdate/github.go | 170 ++ internal/selfupdate/verify.go | 187 ++ internal/test/doc.go | 2 + internal/test/helper.go | 15 + internal/test/helper_go18.go | 19 + internal/test/helpers.go | 204 ++ internal/test/vars.go | 57 + internal/textfile/read.go | 43 + internal/textfile/read_test.go | 76 + internal/ui/backup.go | 367 ++++ internal/ui/message.go | 45 + internal/ui/stdio_wrapper.go | 86 + internal/ui/stdio_wrapper_test.go | 95 + internal/ui/table/table.go | 206 ++ internal/ui/table/table_test.go | 162 ++ internal/ui/termstatus/background.go | 9 + internal/ui/termstatus/background_linux.go | 21 + internal/ui/termstatus/status.go | 334 ++++ internal/ui/termstatus/status_test.go | 32 + internal/ui/termstatus/terminal_posix.go | 36 + internal/ui/termstatus/terminal_unix.go | 47 + internal/ui/termstatus/terminal_windows.go | 154 ++ internal/walker/testing.go | 1 + internal/walker/walker.go | 142 ++ internal/walker/walker_test.go | 544 ++++++ run_integration_tests.go | 673 +++++++ 644 files changed, 80164 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE.md create mode 100644 .github/ISSUE_TEMPLATE/Bug.md create mode 100644 .github/ISSUE_TEMPLATE/Feature.md create mode 100644 .github/PULL_REQUEST_TEMPLATE.md create mode 100644 .gitignore create mode 100644 .hound.yml create mode 100644 .travis.yml create mode 100644 CHANGELOG.md create mode 100644 CONTRIBUTING.md create mode 100644 GOVERNANCE.md create mode 100644 LICENSE create mode 100644 Makefile create mode 100644 README.rst create mode 100644 VERSION create mode 100644 appveyor.yml create mode 100644 build.go create mode 100644 changelog/0.6.0_2017-05-29/issue-953 create mode 100644 changelog/0.6.0_2017-05-29/issue-965 create mode 100644 changelog/0.6.0_2017-05-29/pull-962 create mode 100644 changelog/0.6.1_2017-06-01/issue-985 create mode 100644 changelog/0.6.1_2017-06-01/pull-891 create mode 100644 changelog/0.6.1_2017-06-01/pull-974 create mode 100644 changelog/0.7.0_2017-07-01/issue-1013 create mode 100644 changelog/0.7.0_2017-07-01/issue-1021 create mode 100644 changelog/0.7.0_2017-07-01/issue-1029 create mode 100644 changelog/0.7.0_2017-07-01/issue-512 create mode 100644 changelog/0.7.0_2017-07-01/issue-636 create mode 100644 changelog/0.7.0_2017-07-01/issue-965 create mode 100644 changelog/0.7.0_2017-07-01/issue-989 create mode 100644 changelog/0.7.0_2017-07-01/pull-975 create mode 100644 changelog/0.7.0_2017-07-01/pull-998 create mode 100644 changelog/0.7.1_2017-07-22/issue-1055 create mode 100644 changelog/0.7.1_2017-07-22/issue-1067 create mode 100644 changelog/0.7.1_2017-07-22/issue-1073 create mode 100644 changelog/0.7.1_2017-07-22/issue-1081 create mode 100644 changelog/0.7.1_2017-07-22/pull-1080 create mode 100644 changelog/0.7.1_2017-07-22/pull-1082 create mode 100644 changelog/0.7.1_2017-07-22/pull-1115 create mode 100644 changelog/0.7.2_2017-09-13/issue-1132 create mode 100644 changelog/0.7.2_2017-09-13/issue-1167 create mode 100644 changelog/0.7.2_2017-09-13/issue-1179 create mode 100644 changelog/0.7.2_2017-09-13/issue-1208 create mode 100644 changelog/0.7.2_2017-09-13/issue-317 create mode 100644 changelog/0.7.2_2017-09-13/issues-697 create mode 100644 changelog/0.7.2_2017-09-13/pull-1044 create mode 100644 changelog/0.7.2_2017-09-13/pull-1061 create mode 100644 changelog/0.7.2_2017-09-13/pull-1126 create mode 100644 changelog/0.7.2_2017-09-13/pull-1134 create mode 100644 changelog/0.7.2_2017-09-13/pull-1144 create mode 100644 changelog/0.7.2_2017-09-13/pull-1149 create mode 100644 changelog/0.7.2_2017-09-13/pull-1164 create mode 100644 changelog/0.7.2_2017-09-13/pull-1191 create mode 100644 changelog/0.7.2_2017-09-13/pull-1196 create mode 100644 changelog/0.7.2_2017-09-13/pull-1203 create mode 100644 changelog/0.7.2_2017-09-13/pull-1205 create mode 100644 changelog/0.7.3_2017-09-20/issue-1246 create mode 100644 changelog/0.8.0_2017-11-26/issue-1102 create mode 100644 changelog/0.8.0_2017-11-26/issue-1114 create mode 100644 changelog/0.8.0_2017-11-26/issue-1216 create mode 100644 changelog/0.8.0_2017-11-26/issue-1256 create mode 100644 changelog/0.8.0_2017-11-26/issue-1271 create mode 100644 changelog/0.8.0_2017-11-26/issue-1274 create mode 100644 changelog/0.8.0_2017-11-26/issue-1291 create mode 100644 changelog/0.8.0_2017-11-26/issue-1367 create mode 100644 changelog/0.8.0_2017-11-26/issue-1445 create mode 100644 changelog/0.8.0_2017-11-26/issue-448 create mode 100644 changelog/0.8.0_2017-11-26/issue-510 create mode 100644 changelog/0.8.0_2017-11-26/pull-1040 create mode 100644 changelog/0.8.0_2017-11-26/pull-1249 create mode 100644 changelog/0.8.0_2017-11-26/pull-1269 create mode 100644 changelog/0.8.0_2017-11-26/pull-1281 create mode 100644 changelog/0.8.0_2017-11-26/pull-1317 create mode 100644 changelog/0.8.0_2017-11-26/pull-1319 create mode 100644 changelog/0.8.0_2017-11-26/pull-1353 create mode 100644 changelog/0.8.0_2017-11-26/pull-1437 create mode 100644 changelog/0.8.1_2017-12-27/issue-1457 create mode 100644 changelog/0.8.1_2017-12-27/pull-1436 create mode 100644 changelog/0.8.1_2017-12-27/pull-1439 create mode 100644 changelog/0.8.1_2017-12-27/pull-1452 create mode 100644 changelog/0.8.1_2017-12-27/pull-1454 create mode 100644 changelog/0.8.1_2017-12-27/pull-1459 create mode 100644 changelog/0.8.1_2017-12-27/pull-1462 create mode 100644 changelog/0.8.2_2018-02-17/issue-1506 create mode 100644 changelog/0.8.2_2018-02-17/issue-1512 create mode 100644 changelog/0.8.2_2018-02-17/issue-1522 create mode 100644 changelog/0.8.2_2018-02-17/issue-1528 create mode 100644 changelog/0.8.2_2018-02-17/issue-1541 create mode 100644 changelog/0.8.2_2018-02-17/issue-1567 create mode 100644 changelog/0.8.2_2018-02-17/issue-1590 create mode 100644 changelog/0.8.2_2018-02-17/pull-1507 create mode 100644 changelog/0.8.2_2018-02-17/pull-1538 create mode 100644 changelog/0.8.2_2018-02-17/pull-1549 create mode 100644 changelog/0.8.2_2018-02-17/pull-1554 create mode 100644 changelog/0.8.2_2018-02-17/pull-1564 create mode 100644 changelog/0.8.2_2018-02-17/pull-1579 create mode 100644 changelog/0.8.2_2018-02-17/pull-1584 create mode 100644 changelog/0.8.2_2018-02-17/pull-1589 create mode 100644 changelog/0.8.2_2018-02-17/pull-1594 create mode 100644 changelog/0.8.2_2018-02-17/pull-1595 create mode 100644 changelog/0.8.3_2018-02-26/issue-1497 create mode 100644 changelog/0.8.3_2018-02-26/issue-1633 create mode 100644 changelog/0.8.3_2018-02-26/issue-1641 create mode 100644 changelog/0.8.3_2018-02-26/pull-1560 create mode 100644 changelog/0.8.3_2018-02-26/pull-1623 create mode 100644 changelog/0.8.3_2018-02-26/pull-1634 create mode 100644 changelog/0.8.3_2018-02-26/pull-1638 create mode 100644 changelog/0.9.0_2018-05-21/issue-1433 create mode 100644 changelog/0.9.0_2018-05-21/issue-1561 create mode 100644 changelog/0.9.0_2018-05-21/issue-1608 create mode 100644 changelog/0.9.0_2018-05-21/issue-1652 create mode 100644 changelog/0.9.0_2018-05-21/issue-1665 create mode 100644 changelog/0.9.0_2018-05-21/issue-1721 create mode 100644 changelog/0.9.0_2018-05-21/issue-1730 create mode 100644 changelog/0.9.0_2018-05-21/issue-1758 create mode 100644 changelog/0.9.0_2018-05-21/issue-549 create mode 100644 changelog/0.9.0_2018-05-21/pull-1552 create mode 100644 changelog/0.9.0_2018-05-21/pull-1647 create mode 100644 changelog/0.9.0_2018-05-21/pull-1648 create mode 100644 changelog/0.9.0_2018-05-21/pull-1649 create mode 100644 changelog/0.9.0_2018-05-21/pull-1684 create mode 100644 changelog/0.9.0_2018-05-21/pull-1709 create mode 100644 changelog/0.9.0_2018-05-21/pull-1720 create mode 100644 changelog/0.9.0_2018-05-21/pull-1735 create mode 100644 changelog/0.9.0_2018-05-21/pull-1746 create mode 100644 changelog/0.9.0_2018-05-21/pull-1782 create mode 100644 changelog/0.9.1_2018-06-10/issue-1801 create mode 100644 changelog/0.9.1_2018-06-10/issue-1822 create mode 100644 changelog/0.9.1_2018-06-10/issue-1825 create mode 100644 changelog/0.9.1_2018-06-10/issue-1833 create mode 100644 changelog/0.9.1_2018-06-10/issue-1834 create mode 100644 changelog/0.9.2_2018-08-06/issue-1854 create mode 100644 changelog/0.9.2_2018-08-06/issue-1870 create mode 100644 changelog/0.9.2_2018-08-06/issue-1880 create mode 100644 changelog/0.9.2_2018-08-06/issue-1893 create mode 100644 changelog/0.9.2_2018-08-06/issue-1906 create mode 100644 changelog/0.9.2_2018-08-06/pull-1729 create mode 100644 changelog/0.9.2_2018-08-06/pull-1772 create mode 100644 changelog/0.9.2_2018-08-06/pull-1853 create mode 100644 changelog/0.9.2_2018-08-06/pull-1861 create mode 100644 changelog/0.9.2_2018-08-06/pull-1882 create mode 100644 changelog/0.9.2_2018-08-06/pull-1901 create mode 100644 changelog/0.9.3_2018-10-13/issue-1766 create mode 100644 changelog/0.9.3_2018-10-13/issue-1909 create mode 100644 changelog/0.9.3_2018-10-13/issue-1935 create mode 100644 changelog/0.9.3_2018-10-13/issue-1941 create mode 100644 changelog/0.9.3_2018-10-13/issue-1967 create mode 100644 changelog/0.9.3_2018-10-13/issue-1978 create mode 100644 changelog/0.9.3_2018-10-13/issue-2028 create mode 100644 changelog/0.9.3_2018-10-13/pull-1780 create mode 100644 changelog/0.9.3_2018-10-13/pull-1876 create mode 100644 changelog/0.9.3_2018-10-13/pull-1891 create mode 100644 changelog/0.9.3_2018-10-13/pull-1920 create mode 100644 changelog/0.9.3_2018-10-13/pull-1949 create mode 100644 changelog/0.9.3_2018-10-13/pull-1953 create mode 100644 changelog/0.9.3_2018-10-13/pull-1962 create mode 100644 changelog/0.9.4_2019-01-06/issue-1605 create mode 100644 changelog/0.9.4_2019-01-06/issue-1989 create mode 100644 changelog/0.9.4_2019-01-06/issue-2040 create mode 100644 changelog/0.9.4_2019-01-06/issue-2089 create mode 100644 changelog/0.9.4_2019-01-06/issue-2097 create mode 100644 changelog/0.9.4_2019-01-06/pull-2017 create mode 100644 changelog/0.9.4_2019-01-06/pull-2068 create mode 100644 changelog/0.9.4_2019-01-06/pull-2070 create mode 100644 changelog/0.9.4_2019-01-06/pull-2086 create mode 100644 changelog/0.9.4_2019-01-06/pull-2094 create mode 100644 changelog/0.9.4_2019-01-06/pull-2095 create mode 100644 changelog/CHANGELOG.tmpl create mode 100644 changelog/TEMPLATE create mode 100644 changelog/changelog-github.tmpl create mode 100644 cmd/restic/.gitignore create mode 100644 cmd/restic/cleanup.go create mode 100644 cmd/restic/cmd_backup.go create mode 100644 cmd/restic/cmd_cache.go create mode 100644 cmd/restic/cmd_cat.go create mode 100644 cmd/restic/cmd_check.go create mode 100644 cmd/restic/cmd_debug.go create mode 100644 cmd/restic/cmd_diff.go create mode 100644 cmd/restic/cmd_dump.go create mode 100644 cmd/restic/cmd_find.go create mode 100644 cmd/restic/cmd_forget.go create mode 100644 cmd/restic/cmd_generate.go create mode 100644 cmd/restic/cmd_init.go create mode 100644 cmd/restic/cmd_key.go create mode 100644 cmd/restic/cmd_list.go create mode 100644 cmd/restic/cmd_ls.go create mode 100644 cmd/restic/cmd_migrate.go create mode 100644 cmd/restic/cmd_mount.go create mode 100644 cmd/restic/cmd_options.go create mode 100644 cmd/restic/cmd_prune.go create mode 100644 cmd/restic/cmd_rebuild_index.go create mode 100644 cmd/restic/cmd_recover.go create mode 100644 cmd/restic/cmd_restore.go create mode 100644 cmd/restic/cmd_self_update.go create mode 100644 cmd/restic/cmd_snapshots.go create mode 100644 cmd/restic/cmd_stats.go create mode 100644 cmd/restic/cmd_tag.go create mode 100644 cmd/restic/cmd_unlock.go create mode 100644 cmd/restic/cmd_version.go create mode 100644 cmd/restic/doc.go create mode 100644 cmd/restic/exclude.go create mode 100644 cmd/restic/exclude_test.go create mode 100644 cmd/restic/find.go create mode 100644 cmd/restic/flags_test.go create mode 100644 cmd/restic/format.go create mode 100644 cmd/restic/global.go create mode 100644 cmd/restic/global_debug.go create mode 100644 cmd/restic/global_release.go create mode 100644 cmd/restic/integration_fuse_test.go create mode 100644 cmd/restic/integration_helpers_test.go create mode 100644 cmd/restic/integration_helpers_unix_test.go create mode 100644 cmd/restic/integration_helpers_windows_test.go create mode 100644 cmd/restic/integration_test.go create mode 100644 cmd/restic/local_layout_test.go create mode 100644 cmd/restic/lock.go create mode 100644 cmd/restic/main.go create mode 100644 cmd/restic/testdata/backup-data.tar.gz create mode 100644 cmd/restic/testdata/old-index-repo.tar.gz create mode 100644 cmd/restic/testdata/repo-restore-permissions-test.tar.gz create mode 100644 cmd/restic/testdata/repo-same-timestamps.tar.gz create mode 100644 cmd/restic/testdata/small-repo.tar.gz create mode 100644 cmd/restic/testdata/test.hl.tar.gz create mode 100644 contrib/restic.spec create mode 100644 doc.go create mode 100644 doc/.gitignore create mode 100644 doc/010_introduction.rst create mode 100644 doc/020_installation.rst create mode 100644 doc/030_preparing_a_new_repo.rst create mode 100644 doc/040_backup.rst create mode 100644 doc/045_working_with_repos.rst create mode 100644 doc/050_restore.rst create mode 100644 doc/060_forget.rst create mode 100644 doc/070_encryption.rst create mode 100644 doc/075_scripting.rst create mode 100644 doc/080_examples.rst create mode 100644 doc/090_participating.rst create mode 100644 doc/100_references.rst create mode 100644 doc/110_talks.rst create mode 100644 doc/Makefile create mode 100644 doc/REST_backend.rst create mode 100644 doc/_static/css/restic.css create mode 100644 doc/_static/favicon.ico create mode 100644 doc/bash-completion.sh create mode 100644 doc/cache.rst create mode 100644 doc/conf.py create mode 100644 doc/design.rst create mode 100644 doc/developer_information.rst create mode 100644 doc/faq.rst create mode 100644 doc/images/aws_s3/01_aws_start.png create mode 100644 doc/images/aws_s3/02_aws_menu.png create mode 100644 doc/images/aws_s3/03_buckets_list_before.png create mode 100644 doc/images/aws_s3/04_bucket_create_start.png create mode 100644 doc/images/aws_s3/05_bucket_create_review.png create mode 100644 doc/images/aws_s3/06_buckets_list_after.png create mode 100644 doc/images/aws_s3/07_iam_start.png create mode 100644 doc/images/aws_s3/08_user_list.png create mode 100644 doc/images/aws_s3/09_user_name.png create mode 100644 doc/images/aws_s3/10_user_pre_policy.png create mode 100644 doc/images/aws_s3/11_policy_start.png create mode 100644 doc/images/aws_s3/13_policy_review.png create mode 100644 doc/images/aws_s3/14_user_attach_policy.png create mode 100644 doc/images/aws_s3/15_user_review.png create mode 100644 doc/images/aws_s3/16_user_created.png create mode 100644 doc/index.rst create mode 100644 doc/logo/font/Lemon-Regular.ttf create mode 100644 doc/logo/font/OFL.txt create mode 100644 doc/logo/logo.png create mode 100644 doc/man/restic-backup.1 create mode 100644 doc/man/restic-cache.1 create mode 100644 doc/man/restic-cat.1 create mode 100644 doc/man/restic-check.1 create mode 100644 doc/man/restic-diff.1 create mode 100644 doc/man/restic-dump.1 create mode 100644 doc/man/restic-find.1 create mode 100644 doc/man/restic-forget.1 create mode 100644 doc/man/restic-generate.1 create mode 100644 doc/man/restic-init.1 create mode 100644 doc/man/restic-key.1 create mode 100644 doc/man/restic-list.1 create mode 100644 doc/man/restic-ls.1 create mode 100644 doc/man/restic-migrate.1 create mode 100644 doc/man/restic-mount.1 create mode 100644 doc/man/restic-prune.1 create mode 100644 doc/man/restic-rebuild-index.1 create mode 100644 doc/man/restic-recover.1 create mode 100644 doc/man/restic-restore.1 create mode 100644 doc/man/restic-self-update.1 create mode 100644 doc/man/restic-snapshots.1 create mode 100644 doc/man/restic-stats.1 create mode 100644 doc/man/restic-tag.1 create mode 100644 doc/man/restic-unlock.1 create mode 100644 doc/man/restic-version.1 create mode 100644 doc/man/restic.1 create mode 100644 doc/manual_rest.rst create mode 100644 doc/requirements.txt create mode 100644 doc/test_irreducibility.gap create mode 100644 doc/zsh-completion.zsh create mode 100644 docker/Dockerfile create mode 100644 docker/README.md create mode 100755 docker/build.sh create mode 100644 go.mod create mode 100644 go.sum create mode 100644 helpers/.gitignore create mode 100644 helpers/build-release-binaries/main.go create mode 100644 helpers/prepare-release/main.go create mode 100644 internal/archiver/archiver.go create mode 100644 internal/archiver/archiver_test.go create mode 100644 internal/archiver/blob_saver.go create mode 100644 internal/archiver/blob_saver_test.go create mode 100644 internal/archiver/buffer.go create mode 100644 internal/archiver/doc.go create mode 100644 internal/archiver/file_saver.go create mode 100644 internal/archiver/file_saver_test.go create mode 100644 internal/archiver/index_uploader.go create mode 100644 internal/archiver/scanner.go create mode 100644 internal/archiver/scanner_test.go create mode 100644 internal/archiver/testing.go create mode 100644 internal/archiver/testing_test.go create mode 100644 internal/archiver/tree.go create mode 100644 internal/archiver/tree_saver.go create mode 100644 internal/archiver/tree_saver_test.go create mode 100644 internal/archiver/tree_test.go create mode 100644 internal/backend/azure/azure.go create mode 100644 internal/backend/azure/azure_test.go create mode 100644 internal/backend/azure/config.go create mode 100644 internal/backend/azure/config_test.go create mode 100644 internal/backend/b2/b2.go create mode 100644 internal/backend/b2/b2_test.go create mode 100644 internal/backend/b2/config.go create mode 100644 internal/backend/b2/config_test.go create mode 100644 internal/backend/backend_error.go create mode 100644 internal/backend/backend_retry.go create mode 100644 internal/backend/backend_retry_test.go create mode 100644 internal/backend/doc.go create mode 100644 internal/backend/foreground_solaris.go create mode 100644 internal/backend/foreground_unix.go create mode 100644 internal/backend/foreground_windows.go create mode 100644 internal/backend/gs/config.go create mode 100644 internal/backend/gs/config_test.go create mode 100644 internal/backend/gs/gs.go create mode 100644 internal/backend/gs/gs_test.go create mode 100644 internal/backend/http_transport.go create mode 100644 internal/backend/layout.go create mode 100644 internal/backend/layout_default.go create mode 100644 internal/backend/layout_rest.go create mode 100644 internal/backend/layout_s3legacy.go create mode 100644 internal/backend/layout_test.go create mode 100644 internal/backend/local/config.go create mode 100644 internal/backend/local/doc.go create mode 100644 internal/backend/local/layout_test.go create mode 100644 internal/backend/local/local.go create mode 100644 internal/backend/local/local_test.go create mode 100644 internal/backend/local/local_unix.go create mode 100644 internal/backend/local/local_windows.go create mode 100644 internal/backend/location/location.go create mode 100644 internal/backend/location/location_test.go create mode 100644 internal/backend/mem/mem_backend.go create mode 100644 internal/backend/mem/mem_backend_test.go create mode 100644 internal/backend/paths.go create mode 100644 internal/backend/rclone/backend.go create mode 100644 internal/backend/rclone/backend_test.go create mode 100644 internal/backend/rclone/config.go create mode 100644 internal/backend/rclone/config_test.go create mode 100644 internal/backend/rclone/stdio_conn.go create mode 100644 internal/backend/rclone/stdio_conn_go110.go create mode 100644 internal/backend/rclone/stdio_conn_other.go create mode 100644 internal/backend/rest/config.go create mode 100644 internal/backend/rest/config_test.go create mode 100644 internal/backend/rest/rest.go create mode 100644 internal/backend/rest/rest_int_test.go create mode 100644 internal/backend/rest/rest_test.go create mode 100644 internal/backend/s3/config.go create mode 100644 internal/backend/s3/config_test.go create mode 100644 internal/backend/s3/s3.go create mode 100644 internal/backend/s3/s3_test.go create mode 100644 internal/backend/semaphore.go create mode 100644 internal/backend/sftp/config.go create mode 100644 internal/backend/sftp/config_test.go create mode 100644 internal/backend/sftp/doc.go create mode 100644 internal/backend/sftp/layout_test.go create mode 100644 internal/backend/sftp/sftp.go create mode 100644 internal/backend/sftp/sftp_test.go create mode 100644 internal/backend/sftp/sshcmd_test.go create mode 100644 internal/backend/shell_split.go create mode 100644 internal/backend/shell_split_test.go create mode 100644 internal/backend/swift/config.go create mode 100644 internal/backend/swift/config_test.go create mode 100644 internal/backend/swift/swift.go create mode 100644 internal/backend/swift/swift_test.go create mode 100644 internal/backend/test/benchmarks.go create mode 100644 internal/backend/test/doc.go create mode 100644 internal/backend/test/suite.go create mode 100644 internal/backend/test/tests.go create mode 100644 internal/backend/test/tests_test.go create mode 100644 internal/backend/testdata/repo-layout-default.tar.gz create mode 100644 internal/backend/testdata/repo-layout-s3legacy.tar.gz create mode 100644 internal/backend/utils.go create mode 100644 internal/backend/utils_test.go create mode 100644 internal/cache/backend.go create mode 100644 internal/cache/backend_test.go create mode 100644 internal/cache/cache.go create mode 100644 internal/cache/dir.go create mode 100644 internal/cache/file.go create mode 100644 internal/cache/file_test.go create mode 100644 internal/cache/testing.go create mode 100644 internal/checker/checker.go create mode 100644 internal/checker/checker_test.go create mode 100644 internal/checker/testdata/checker-test-repo.tar.gz create mode 100644 internal/checker/testdata/duplicate-packs-in-index-test-repo.tar.gz create mode 100644 internal/checker/testing.go create mode 100644 internal/crypto/crypto.go create mode 100644 internal/crypto/crypto_int_test.go create mode 100644 internal/crypto/crypto_test.go create mode 100644 internal/crypto/doc.go create mode 100644 internal/crypto/kdf.go create mode 100644 internal/crypto/kdf_test.go create mode 100644 internal/debug/debug.go create mode 100644 internal/debug/debug_release.go create mode 100644 internal/debug/doc.go create mode 100644 internal/debug/hooks.go create mode 100644 internal/debug/hooks_release.go create mode 100644 internal/debug/log_test.go create mode 100644 internal/debug/round_tripper_debug.go create mode 100644 internal/debug/round_tripper_release.go create mode 100644 internal/errors/doc.go create mode 100644 internal/errors/errors.go create mode 100644 internal/errors/fatal.go create mode 100644 internal/filter/doc.go create mode 100644 internal/filter/filter.go create mode 100644 internal/filter/filter_test.go create mode 100644 internal/filter/testdata/libreoffice.txt.bz2 create mode 100644 internal/fs/const.go create mode 100644 internal/fs/const_unix.go create mode 100644 internal/fs/const_windows.go create mode 100644 internal/fs/deviceid_unix.go create mode 100644 internal/fs/deviceid_windows.go create mode 100644 internal/fs/doc.go create mode 100644 internal/fs/file.go create mode 100644 internal/fs/file_unix.go create mode 100644 internal/fs/file_windows.go create mode 100644 internal/fs/fs_helpers.go create mode 100644 internal/fs/fs_local.go create mode 100644 internal/fs/fs_reader.go create mode 100644 internal/fs/fs_reader_test.go create mode 100644 internal/fs/fs_track.go create mode 100644 internal/fs/helpers.go create mode 100644 internal/fs/interface.go create mode 100644 internal/fs/path_prefix.go create mode 100644 internal/fs/path_prefix_test.go create mode 100644 internal/fs/stat.go create mode 100644 internal/fs/stat_bsd.go create mode 100644 internal/fs/stat_test.go create mode 100644 internal/fs/stat_unix.go create mode 100644 internal/fs/stat_windows.go create mode 100644 internal/fuse/blob_size_cache.go create mode 100644 internal/fuse/dir.go create mode 100644 internal/fuse/file.go create mode 100644 internal/fuse/file_test.go create mode 100644 internal/fuse/link.go create mode 100644 internal/fuse/meta_dir.go create mode 100644 internal/fuse/other.go create mode 100644 internal/fuse/root.go create mode 100644 internal/fuse/snapshots_dir.go create mode 100644 internal/hashing/reader.go create mode 100644 internal/hashing/reader_test.go create mode 100644 internal/hashing/writer.go create mode 100644 internal/hashing/writer_test.go create mode 100644 internal/index/index.go create mode 100644 internal/index/index_test.go create mode 100644 internal/limiter/limiter.go create mode 100644 internal/limiter/limiter_backend.go create mode 100644 internal/limiter/static_limiter.go create mode 100644 internal/migrations/doc.go create mode 100644 internal/migrations/interface.go create mode 100644 internal/migrations/list.go create mode 100644 internal/migrations/s3_layout.go create mode 100644 internal/mock/backend.go create mode 100644 internal/mock/repository.go create mode 100644 internal/options/options.go create mode 100644 internal/options/options_test.go create mode 100644 internal/pack/doc.go create mode 100644 internal/pack/pack.go create mode 100644 internal/pack/pack_internal_test.go create mode 100644 internal/pack/pack_test.go create mode 100644 internal/repository/doc.go create mode 100644 internal/repository/index.go create mode 100644 internal/repository/index_test.go create mode 100644 internal/repository/key.go create mode 100644 internal/repository/master_index.go create mode 100644 internal/repository/master_index_test.go create mode 100644 internal/repository/packer_manager.go create mode 100644 internal/repository/packer_manager_test.go create mode 100644 internal/repository/parallel.go create mode 100644 internal/repository/parallel_test.go create mode 100644 internal/repository/pool.go create mode 100644 internal/repository/repack.go create mode 100644 internal/repository/repack_test.go create mode 100644 internal/repository/repository.go create mode 100644 internal/repository/repository_test.go create mode 100644 internal/repository/testdata/test-repo.tar.gz create mode 100644 internal/repository/testing.go create mode 100644 internal/restic/backend.go create mode 100644 internal/restic/backend_find.go create mode 100644 internal/restic/backend_find_test.go create mode 100644 internal/restic/blob.go create mode 100644 internal/restic/blob_set.go create mode 100644 internal/restic/blob_test.go create mode 100644 internal/restic/buffer.go create mode 100644 internal/restic/cache.go create mode 100644 internal/restic/config.go create mode 100644 internal/restic/config_test.go create mode 100644 internal/restic/doc.go create mode 100644 internal/restic/duration.go create mode 100644 internal/restic/duration_test.go create mode 100644 internal/restic/file.go create mode 100644 internal/restic/file_test.go create mode 100644 internal/restic/find.go create mode 100644 internal/restic/find_test.go create mode 100644 internal/restic/hardlinks_index.go create mode 100644 internal/restic/hardlinks_index_test.go create mode 100644 internal/restic/id.go create mode 100644 internal/restic/id_int_test.go create mode 100644 internal/restic/id_test.go create mode 100644 internal/restic/ids.go create mode 100644 internal/restic/ids_test.go create mode 100644 internal/restic/idset.go create mode 100644 internal/restic/idset_test.go create mode 100644 internal/restic/lock.go create mode 100644 internal/restic/lock_test.go create mode 100644 internal/restic/lock_unix.go create mode 100644 internal/restic/lock_windows.go create mode 100644 internal/restic/node.go create mode 100644 internal/restic/node_darwin.go create mode 100644 internal/restic/node_freebsd.go create mode 100644 internal/restic/node_linux.go create mode 100644 internal/restic/node_netbsd.go create mode 100644 internal/restic/node_openbsd.go create mode 100644 internal/restic/node_solaris.go create mode 100644 internal/restic/node_test.go create mode 100644 internal/restic/node_unix.go create mode 100644 internal/restic/node_unix_test.go create mode 100644 internal/restic/node_windows.go create mode 100644 internal/restic/node_xattr.go create mode 100644 internal/restic/progress.go create mode 100644 internal/restic/progress_unix.go create mode 100644 internal/restic/progress_unix_with_siginfo.go create mode 100644 internal/restic/rand_reader.go create mode 100644 internal/restic/readerat.go create mode 100644 internal/restic/repository.go create mode 100644 internal/restic/rewind_reader.go create mode 100644 internal/restic/rewind_reader_test.go create mode 100644 internal/restic/snapshot.go create mode 100644 internal/restic/snapshot_find.go create mode 100644 internal/restic/snapshot_policy.go create mode 100644 internal/restic/snapshot_policy_test.go create mode 100644 internal/restic/snapshot_test.go create mode 100644 internal/restic/tag_list.go create mode 100644 internal/restic/testdata/filter_snapshots_0 create mode 100644 internal/restic/testdata/filter_snapshots_1 create mode 100644 internal/restic/testdata/filter_snapshots_2 create mode 100644 internal/restic/testdata/filter_snapshots_3 create mode 100644 internal/restic/testdata/filter_snapshots_4 create mode 100644 internal/restic/testdata/filter_snapshots_5 create mode 100644 internal/restic/testdata/filter_snapshots_6 create mode 100644 internal/restic/testdata/filter_snapshots_7 create mode 100644 internal/restic/testdata/filter_snapshots_8 create mode 100644 internal/restic/testdata/filter_snapshots_9 create mode 100644 internal/restic/testdata/policy_keep_snapshots_0 create mode 100644 internal/restic/testdata/policy_keep_snapshots_1 create mode 100644 internal/restic/testdata/policy_keep_snapshots_10 create mode 100644 internal/restic/testdata/policy_keep_snapshots_11 create mode 100644 internal/restic/testdata/policy_keep_snapshots_12 create mode 100644 internal/restic/testdata/policy_keep_snapshots_13 create mode 100644 internal/restic/testdata/policy_keep_snapshots_14 create mode 100644 internal/restic/testdata/policy_keep_snapshots_15 create mode 100644 internal/restic/testdata/policy_keep_snapshots_16 create mode 100644 internal/restic/testdata/policy_keep_snapshots_17 create mode 100644 internal/restic/testdata/policy_keep_snapshots_18 create mode 100644 internal/restic/testdata/policy_keep_snapshots_19 create mode 100644 internal/restic/testdata/policy_keep_snapshots_2 create mode 100644 internal/restic/testdata/policy_keep_snapshots_20 create mode 100644 internal/restic/testdata/policy_keep_snapshots_21 create mode 100644 internal/restic/testdata/policy_keep_snapshots_22 create mode 100644 internal/restic/testdata/policy_keep_snapshots_23 create mode 100644 internal/restic/testdata/policy_keep_snapshots_24 create mode 100644 internal/restic/testdata/policy_keep_snapshots_25 create mode 100644 internal/restic/testdata/policy_keep_snapshots_26 create mode 100644 internal/restic/testdata/policy_keep_snapshots_27 create mode 100644 internal/restic/testdata/policy_keep_snapshots_28 create mode 100644 internal/restic/testdata/policy_keep_snapshots_29 create mode 100644 internal/restic/testdata/policy_keep_snapshots_3 create mode 100644 internal/restic/testdata/policy_keep_snapshots_4 create mode 100644 internal/restic/testdata/policy_keep_snapshots_5 create mode 100644 internal/restic/testdata/policy_keep_snapshots_6 create mode 100644 internal/restic/testdata/policy_keep_snapshots_7 create mode 100644 internal/restic/testdata/policy_keep_snapshots_8 create mode 100644 internal/restic/testdata/policy_keep_snapshots_9 create mode 100644 internal/restic/testdata/used_blobs_snapshot0 create mode 100644 internal/restic/testdata/used_blobs_snapshot1 create mode 100644 internal/restic/testdata/used_blobs_snapshot2 create mode 100644 internal/restic/testing.go create mode 100644 internal/restic/testing_test.go create mode 100644 internal/restic/tree.go create mode 100644 internal/restic/tree_test.go create mode 100644 internal/restorer/doc.go create mode 100644 internal/restorer/filepacktraverser.go create mode 100644 internal/restorer/filerestorer.go create mode 100644 internal/restorer/filerestorer_test.go create mode 100644 internal/restorer/fileswriter.go create mode 100644 internal/restorer/fileswriter_test.go create mode 100644 internal/restorer/packcache.go create mode 100644 internal/restorer/packcache_test.go create mode 100644 internal/restorer/packheap.go create mode 100644 internal/restorer/packqueue.go create mode 100644 internal/restorer/packqueue_test.go create mode 100644 internal/restorer/restorer.go create mode 100644 internal/restorer/restorer_test.go create mode 100644 internal/restorer/restorer_unix_test.go create mode 100644 internal/selfupdate/download.go create mode 100644 internal/selfupdate/github.go create mode 100644 internal/selfupdate/verify.go create mode 100644 internal/test/doc.go create mode 100644 internal/test/helper.go create mode 100644 internal/test/helper_go18.go create mode 100644 internal/test/helpers.go create mode 100644 internal/test/vars.go create mode 100644 internal/textfile/read.go create mode 100644 internal/textfile/read_test.go create mode 100644 internal/ui/backup.go create mode 100644 internal/ui/message.go create mode 100644 internal/ui/stdio_wrapper.go create mode 100644 internal/ui/stdio_wrapper_test.go create mode 100644 internal/ui/table/table.go create mode 100644 internal/ui/table/table_test.go create mode 100644 internal/ui/termstatus/background.go create mode 100644 internal/ui/termstatus/background_linux.go create mode 100644 internal/ui/termstatus/status.go create mode 100644 internal/ui/termstatus/status_test.go create mode 100644 internal/ui/termstatus/terminal_posix.go create mode 100644 internal/ui/termstatus/terminal_unix.go create mode 100644 internal/ui/termstatus/terminal_windows.go create mode 100644 internal/walker/testing.go create mode 100644 internal/walker/walker.go create mode 100644 internal/walker/walker_test.go create mode 100644 run_integration_tests.go diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 000000000..70fae4d33 --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,27 @@ + + + +Output of `restic version` +-------------------------- + + + +Describe the issue +------------------ diff --git a/.github/ISSUE_TEMPLATE/Bug.md b/.github/ISSUE_TEMPLATE/Bug.md new file mode 100644 index 000000000..31b5215e8 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/Bug.md @@ -0,0 +1,93 @@ +--- +name: Bug report +about: Report a problem with restic to help us resolve it and improve +--- + + + + +Output of `restic version` +-------------------------- + + +How did you run restic exactly? +------------------------------- + + + +What backend/server/service did you use to store the repository? +---------------------------------------------------------------- + + + +Expected behavior +----------------- + + + +Actual behavior +--------------- + + + +Steps to reproduce the behavior +------------------------------- + + + +Do you have any idea what may have caused this? +----------------------------------------------- + + + +Do you have an idea how to solve the issue? +------------------------------------------- + + + +Did restic help you or made you happy in any way? +------------------------------------------------- + + diff --git a/.github/ISSUE_TEMPLATE/Feature.md b/.github/ISSUE_TEMPLATE/Feature.md new file mode 100644 index 000000000..7e2edd6b1 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/Feature.md @@ -0,0 +1,57 @@ +--- +name: Feature request +about: Suggest a new feature or enhancement for restic +--- + + + + +Output of `restic version` +-------------------------- + + + +What should restic do differently? Which functionality do you think we should add? +---------------------------------------------------------------------------------- + + + + +What are you trying to do? +-------------------------- + + + +Did restic help you or made you happy in any way? +------------------------------------------------- + + diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..a133efb66 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,36 @@ + + + + +What is the purpose of this change? What does it change? +-------------------------------------------------------- + + + +Was the change discussed in an issue or in the forum before? +------------------------------------------------------------ + + + +Checklist +--------- + +- [ ] I have read the [Contribution Guidelines](https://github.com/restic/restic/blob/master/CONTRIBUTING.md#providing-patches) +- [ ] I have added tests for all changes in this PR +- [ ] I have added documentation for the changes (in the manual) +- [ ] There's a new file in `changelog/unreleased/` that describes the changes for our users (template [here](https://github.com/restic/restic/blob/master/changelog/TEMPLATE)) +- [ ] I have run `gofmt` on the code in all commits +- [ ] All commit messages are formatted in the same style as [the other commits in the repo](https://github.com/restic/restic/blob/master/CONTRIBUTING.md#git-commits) +- [ ] I'm done, this Pull Request is ready for review diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..33a01ba61 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +/restic +/.vagrant diff --git a/.hound.yml b/.hound.yml new file mode 100644 index 000000000..e5c719dd2 --- /dev/null +++ b/.hound.yml @@ -0,0 +1,2 @@ +go: + enabled: true diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 000000000..b54df0c3a --- /dev/null +++ b/.travis.yml @@ -0,0 +1,61 @@ +language: go +sudo: false + +matrix: + include: + - os: linux + go: "1.9.x" + env: RESTIC_TEST_FUSE=0 RESTIC_TEST_CLOUD_BACKENDS=0 RESTIC_BUILD_SOLARIS=0 + cache: + directories: + - $HOME/.cache/go-build + - $HOME/gopath/pkg/mod + + - os: linux + go: "1.10.x" + env: RESTIC_TEST_FUSE=0 RESTIC_TEST_CLOUD_BACKENDS=0 + cache: + directories: + - $HOME/.cache/go-build + - $HOME/gopath/pkg/mod + + # only run fuse and cloud backends tests on Travis for the latest Go on Linux + - os: linux + go: "1.11.x" + sudo: true + cache: + directories: + - $HOME/.cache/go-build + - $HOME/gopath/pkg/mod + + - os: osx + go: "1.11.x" + env: RESTIC_TEST_FUSE=0 RESTIC_TEST_CLOUD_BACKENDS=0 + cache: + directories: + - $HOME/Library/Caches/go-build + - $HOME/gopath/pkg/mod + +branches: + only: + - master + +notifications: + irc: + channels: + - "chat.freenode.net#restic" + on_success: change + on_failure: change + skip_join: true + +install: + - go version + - export GOBIN="$GOPATH/bin" + - export PATH="$PATH:$GOBIN" + - go env + +script: + - go run run_integration_tests.go + +after_success: + - test -r all.cov && bash <(curl -s https://codecov.io/bash) -f all.cov diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 000000000..e1d819ebf --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,1701 @@ +Changelog for restic 0.9.4 (2019-01-06) +======================================= + +The following sections list the changes in restic 0.9.4 relevant to +restic users. The changes are ordered by importance. + +Summary +------- + + * Fix #1989: Google Cloud Storage: Respect bandwidth limit + * Fix #2040: Add host name filter shorthand flag for `stats` command + * Fix #2068: Correctly return error loading data + * Fix #2095: Consistently use local time for snapshots times + * Enh #1605: Concurrent restore + * Enh #2089: Increase granularity of the "keep within" retention policy + * Enh #2097: Add key hinting + * Enh #2017: Mount: Enforce FUSE Unix permissions with allow-other + * Enh #2070: Make all commands display timestamps in local time + * Enh #2085: Allow --files-from to be specified multiple times + * Enh #2094: Run command to get password + +Details +------- + + * Bugfix #1989: Google Cloud Storage: Respect bandwidth limit + + The GCS backend did not respect the bandwidth limit configured, a previous commit + accidentally removed support for it. + + https://github.com/restic/restic/issues/1989 + https://github.com/restic/restic/pull/2100 + + * Bugfix #2040: Add host name filter shorthand flag for `stats` command + + The default value for `--host` flag was set to 'H' (the shorthand version of the flag), this + caused the lookup for the latest snapshot to fail. + + Add shorthand flag `-H` for `--host` (with empty default so if these flags are not specified the + latest snapshot will not filter by host name). + + Also add shorthand `-H` for `backup` command. + + https://github.com/restic/restic/issues/2040 + + * Bugfix #2068: Correctly return error loading data + + In one case during `prune` and `check`, an error loading data from the backend is not returned + properly. This is now corrected. + + https://github.com/restic/restic/issues/1999#issuecomment-433737921 + https://github.com/restic/restic/pull/2068 + + * Bugfix #2095: Consistently use local time for snapshots times + + By default snapshots created with restic backup were set to local time, but when the --time flag + was used the provided timestamp was parsed as UTC. With this change all snapshots times are set + to local time. + + https://github.com/restic/restic/pull/2095 + + * Enhancement #1605: Concurrent restore + + This change significantly improves restore performance, especially when using + high-latency remote repositories like B2. + + The implementation now uses several concurrent threads to download and process multiple + remote files concurrently. To further reduce restore time, each remote file is downloaded + using a single repository request. + + https://github.com/restic/restic/issues/1605 + https://github.com/restic/restic/pull/1719 + + * Enhancement #2089: Increase granularity of the "keep within" retention policy + + The `keep-within` option of the `forget` command now accepts time ranges with an hourly + granularity. For example, running `restic forget --keep-within 3d12h` will keep all the + snapshots made within three days and twelve hours from the time of the latest snapshot. + + https://github.com/restic/restic/issues/2089 + https://github.com/restic/restic/pull/2090 + + * Enhancement #2097: Add key hinting + + Added a new option `--key-hint` and corresponding environment variable `RESTIC_KEY_HINT`. + The key hint is a key ID to try decrypting first, before other keys in the repository. + + This change will benefit repositories with many keys; if the correct key hint is supplied then + restic only needs to check one key. If the key hint is incorrect (the key does not exist, or the + password is incorrect) then restic will check all keys, as usual. + + https://github.com/restic/restic/issues/2097 + + * Enhancement #2017: Mount: Enforce FUSE Unix permissions with allow-other + + The fuse mount (`restic mount`) now lets the kernel check the permissions of the files within + snapshots (this is done through the `DefaultPermissions` FUSE option) when the option + `--allow-other` is specified. + + To restore the old behavior, we've added the `--no-default-permissions` option. This allows + all users that have access to the mount point to access all files within the snapshots. + + https://github.com/restic/restic/pull/2017 + + * Enhancement #2070: Make all commands display timestamps in local time + + Restic used to drop the timezone information from displayed timestamps, it now converts + timestamps to local time before printing them so the times can be easily compared to. + + https://github.com/restic/restic/pull/2070 + + * Enhancement #2085: Allow --files-from to be specified multiple times + + Before, restic took only the last file specified with `--files-from` into account, this is now + corrected. + + https://github.com/restic/restic/issues/2085 + https://github.com/restic/restic/pull/2086 + + * Enhancement #2094: Run command to get password + + We've added the `--password-command` option which allows specifying a command that restic + runs every time the password for the repository is needed, so it can be integrated with a + password manager or keyring. The option can also be set via the environment variable + `$RESTIC_PASSWORD_COMMAND`. + + https://github.com/restic/restic/pull/2094 + + +Changelog for restic 0.9.3 (2018-10-13) +======================================= + +The following sections list the changes in restic 0.9.3 relevant to +restic users. The changes are ordered by importance. + +Summary +------- + + * Fix #1935: Remove truncated files from cache + * Fix #1978: Do not return an error when the scanner is slower than backup + * Enh #1766: Restore: suppress lchown errors when not running as root + * Enh #1909: Reject files/dirs by name first + * Enh #1940: Add directory filter to ls command + * Enh #1967: Use `--host` everywhere + * Enh #2028: Display size of cache directories + * Enh #1777: Improve the `find` command + * Enh #1876: Display reason why forget keeps snapshots + * Enh #1891: Accept glob in paths loaded via --files-from + * Enh #1920: Vendor dependencies with Go 1.11 Modules + * Enh #1949: Add new command `self-update` + * Enh #1953: Ls: Add JSON output support for restic ls cmd + * Enh #1962: Stream JSON output for ls command + +Details +------- + + * Bugfix #1935: Remove truncated files from cache + + When a file in the local cache is truncated, and restic tries to access data beyond the end of the + (cached) file, it used to return an error "EOF". This is now fixed, such truncated files are + removed and the data is fetched directly from the backend. + + https://github.com/restic/restic/issues/1935 + + * Bugfix #1978: Do not return an error when the scanner is slower than backup + + When restic makes a backup, there's a background task called "scanner" which collects + information on how many files and directories are to be saved, in order to display progress + information to the user. When the backup finishes faster than the scanner, it is aborted + because the result is not needed any more. This logic contained a bug, where quitting the + scanner process was treated as an error, and caused restic to print an unhelpful error message + ("context canceled"). + + https://github.com/restic/restic/issues/1978 + https://github.com/restic/restic/pull/1991 + + * Enhancement #1766: Restore: suppress lchown errors when not running as root + + Like "cp" and "rsync" do, restic now only reports errors for changing the ownership of files + during restore if it is run as root, on non-Windows operating systems. On Windows, the error + is reported as usual. + + https://github.com/restic/restic/issues/1766 + + * Enhancement #1909: Reject files/dirs by name first + + The current scanner/archiver code had an architectural limitation: it always ran the + `lstat()` system call on all files and directories before a decision to include/exclude the + file/dir was made. This lead to a lot of unnecessary system calls for items that could have been + rejected by their name or path only. + + We've changed the archiver/scanner implementation so that it now first rejects by name/path, + and only runs the system call on the remaining items. This reduces the number of `lstat()` + system calls a lot (depending on the exclude settings). + + https://github.com/restic/restic/issues/1909 + https://github.com/restic/restic/pull/1912 + + * Enhancement #1940: Add directory filter to ls command + + The ls command can now be filtered by directories, so that only files in the given directories + will be shown. If the --recursive flag is specified, then ls will traverse subfolders and list + their files as well. + + It used to be possible to specify multiple snapshots, but that has been replaced by only one + snapshot and the possibility of specifying multiple directories. + + Specifying directories constrains the walk, which can significantly speed up the listing. + + https://github.com/restic/restic/issues/1940 + https://github.com/restic/restic/pull/1941 + + * Enhancement #1967: Use `--host` everywhere + + We now use the flag `--host` for all commands which need a host name, using `--hostname` (e.g. + for `restic backup`) still works, but will print a deprecation warning. Also, add the short + option `-H` where possible. + + https://github.com/restic/restic/issues/1967 + + * Enhancement #2028: Display size of cache directories + + The `cache` command now by default shows the size of the individual cache directories. It can be + disabled with `--no-size`. + + https://github.com/restic/restic/issues/2028 + https://github.com/restic/restic/pull/2033 + + * Enhancement #1777: Improve the `find` command + + We've updated the `find` command to support multiple patterns. + + `restic find` is now able to list the snapshots containing a specific tree or blob, or even the + snapshots that contain blobs belonging to a given pack. A list of IDs can be given, as long as they + all have the same type. + + The command `find` can also display the pack IDs the blobs belong to, if the `--show-pack-id` + flag is provided. + + https://github.com/restic/restic/issues/1777 + https://github.com/restic/restic/pull/1780 + + * Enhancement #1876: Display reason why forget keeps snapshots + + We've added a column to the list of snapshots `forget` keeps which details the reasons to keep a + particuliar snapshot. This makes debugging policies for forget much easier. Please remember + to always try things out with `--dry-run`! + + https://github.com/restic/restic/pull/1876 + + * Enhancement #1891: Accept glob in paths loaded via --files-from + + Before that, behaviour was different if paths were appended to command line or from a file, + because wild card characters were expanded by shell if appended to command line, but not + expanded if loaded from file. + + https://github.com/restic/restic/issues/1891 + + * Enhancement #1920: Vendor dependencies with Go 1.11 Modules + + Until now, we've used `dep` for managing dependencies, we've now switch to using Go modules. + For users this does not change much, only if you want to compile restic without downloading + anything with Go 1.11, then you need to run: `go build -mod=vendor build.go` + + https://github.com/restic/restic/pull/1920 + + * Enhancement #1949: Add new command `self-update` + + We have added a new command called `self-update` which downloads the latest released version + of restic from GitHub and replaces the current binary with it. It does not rely on any external + program (so it'll work everywhere), but still verifies the GPG signature using the embedded + GPG public key. + + By default, the `self-update` command is hidden behind the `selfupdate` built tag, which is + only set when restic is built using `build.go` (including official releases). The reason for + this is that downstream distributions will then not include the command by default, so users + are encouraged to use the platform-specific distribution mechanism. + + https://github.com/restic/restic/pull/1949 + + * Enhancement #1953: Ls: Add JSON output support for restic ls cmd + + We've implemented listing files in the repository with JSON as output, just pass `--json` as an + option to `restic ls`. This makes the output of the command machine readable. + + https://github.com/restic/restic/pull/1953 + + * Enhancement #1962: Stream JSON output for ls command + + The `ls` command now supports JSON output with the global `--json` flag, and this change + streams out JSON messages one object at a time rather than en entire array buffered in memory + before encoding. The advantage is it allows large listings to be handled efficiently. + + Two message types are printed: snapshots and nodes. A snapshot object will precede node + objects which belong to that snapshot. The `struct_type` field can be used to determine which + kind of message an object is. + + https://github.com/restic/restic/pull/1962 + + +Changelog for restic 0.9.2 (2018-08-06) +======================================= + +The following sections list the changes in restic 0.9.2 relevant to +restic users. The changes are ordered by importance. + +Summary +------- + + * Fix #1854: Allow saving files/dirs on different fs with `--one-file-system` + * Fix #1870: Fix restore with --include + * Fix #1880: Use `--cache-dir` argument for `check` command + * Fix #1893: Return error when exclude file cannot be read + * Fix #1861: Fix case-insensitive search with restic find + * Enh #1906: Add support for B2 application keys + * Enh #874: Add stats command to get information about a repository + * Enh #1772: Add restore --verify to verify restored file content + * Enh #1853: Add JSON output support to `restic key list` + * Enh #1477: S3 backend: accept AWS_SESSION_TOKEN + * Enh #1901: Update the Backblaze B2 library + +Details +------- + + * Bugfix #1854: Allow saving files/dirs on different fs with `--one-file-system` + + Restic now allows saving files/dirs on a different file system in a subdir correctly even when + `--one-file-system` is specified. + + The first thing the restic archiver code does is to build a tree of the target + files/directories. If it detects that a parent directory is already included (e.g. `restic + backup /foo /foo/bar/baz`), it'll ignore the latter argument. + + Without `--one-file-system`, that's perfectly valid: If `/foo` is to be archived, it will + include `/foo/bar/baz`. But with `--one-file-system`, `/foo/bar/baz` may reside on a + different file system, so it won't be included with `/foo`. + + https://github.com/restic/restic/issues/1854 + https://github.com/restic/restic/pull/1855 + + * Bugfix #1870: Fix restore with --include + + We fixed a bug which prevented restic to restore files with an include filter. + + https://github.com/restic/restic/issues/1870 + https://github.com/restic/restic/pull/1900 + + * Bugfix #1880: Use `--cache-dir` argument for `check` command + + `check` command now uses a temporary sub-directory of the specified directory if set using the + `--cache-dir` argument. If not set, the cache directory is created in the default temporary + directory as before. In either case a temporary cache is used to ensure the actual repository is + checked (rather than a local copy). + + The `--cache-dir` argument was not used by the `check` command, instead a cache directory was + created in the temporary directory. + + https://github.com/restic/restic/issues/1880 + + * Bugfix #1893: Return error when exclude file cannot be read + + A bug was found: when multiple exclude files were passed to restic and one of them could not be + read, an error was printed and restic continued, ignoring even the existing exclude files. + Now, an error message is printed and restic aborts when an exclude file cannot be read. + + https://github.com/restic/restic/issues/1893 + + * Bugfix #1861: Fix case-insensitive search with restic find + + We've fixed the behavior for `restic find -i PATTERN`, which was broken in v0.9.1. + + https://github.com/restic/restic/pull/1861 + + * Enhancement #1906: Add support for B2 application keys + + Restic can now use so-called "application keys" which can be created in the B2 dashboard and + were only introduced recently. In contrast to the "master key", such keys can be restricted to a + specific bucket and/or path. + + https://github.com/restic/restic/issues/1906 + https://github.com/restic/restic/pull/1914 + + * Enhancement #874: Add stats command to get information about a repository + + https://github.com/restic/restic/issues/874 + https://github.com/restic/restic/pull/1729 + + * Enhancement #1772: Add restore --verify to verify restored file content + + Restore will print error message if restored file content does not match expected SHA256 + checksum + + https://github.com/restic/restic/pull/1772 + + * Enhancement #1853: Add JSON output support to `restic key list` + + This PR enables users to get the output of `restic key list` in JSON in addition to the existing + table format. + + https://github.com/restic/restic/pull/1853 + + * Enhancement #1477: S3 backend: accept AWS_SESSION_TOKEN + + Before, it was not possible to use s3 backend with AWS temporary security credentials(with + AWS_SESSION_TOKEN). This change gives higher priority to credentials.EnvAWS credentials + provider. + + https://github.com/restic/restic/issues/1477 + https://github.com/restic/restic/pull/1479 + https://github.com/restic/restic/pull/1647 + + * Enhancement #1901: Update the Backblaze B2 library + + We've updated the library we're using for accessing the Backblaze B2 service to 0.5.0 to + include support for upcoming so-called "application keys". With this feature, you can create + access credentials for B2 which are restricted to e.g. a single bucket or even a sub-directory + of a bucket. + + https://github.com/restic/restic/pull/1901 + https://github.com/kurin/blazer + + +Changelog for restic 0.9.1 (2018-06-10) +======================================= + +The following sections list the changes in restic 0.9.1 relevant to +restic users. The changes are ordered by importance. + +Summary +------- + + * Fix #1801: Add limiting bandwidth to the rclone backend + * Fix #1822: Allow uploading large files to MS Azure + * Fix #1825: Correct `find` to not skip snapshots + * Fix #1833: Fix caching files on error + * Fix #1834: Resolve deadlock + +Details +------- + + * Bugfix #1801: Add limiting bandwidth to the rclone backend + + The rclone backend did not respect `--limit-upload` or `--limit-download`. Oftentimes it's + not necessary to use this, as the limiting in rclone itself should be used because it gives much + better results, but in case a remote instance of rclone is used (e.g. called via ssh), it is still + relevant to limit the bandwidth from restic to rclone. + + https://github.com/restic/restic/issues/1801 + + * Bugfix #1822: Allow uploading large files to MS Azure + + Sometimes, restic creates files to be uploaded to the repository which are quite large, e.g. + when saving directories with many entries or very large files. The MS Azure API does not allow + uploading files larger that 256MiB directly, rather restic needs to upload them in blocks of + 100MiB. This is now implemented. + + https://github.com/restic/restic/issues/1822 + + * Bugfix #1825: Correct `find` to not skip snapshots + + Under certain circumstances, the `find` command was found to skip snapshots containing + directories with files to look for when the directories haven't been modified at all, and were + already printed as part of a different snapshot. This is now corrected. + + In addition, we've switched to our own matching/pattern implementation, so now things like + `restic find "/home/user/foo/**/main.go"` are possible. + + https://github.com/restic/restic/issues/1825 + https://github.com/restic/restic/issues/1823 + + * Bugfix #1833: Fix caching files on error + + During `check` it may happen that different threads access the same file in the backend, which + is then downloaded into the cache only once. When that fails, only the thread which is + responsible for downloading the file signals the correct error. The other threads just assume + that the file has been downloaded successfully and then get an error when they try to access the + cached file. + + https://github.com/restic/restic/issues/1833 + + * Bugfix #1834: Resolve deadlock + + When the "scanning" process restic runs to find out how much data there is does not finish before + the backup itself is done, restic stops doing anything. This is resolved now. + + https://github.com/restic/restic/issues/1834 + https://github.com/restic/restic/pull/1835 + + +Changelog for restic 0.9.0 (2018-05-21) +======================================= + +The following sections list the changes in restic 0.9.0 relevant to +restic users. The changes are ordered by importance. + +Summary +------- + + * Fix #1608: Respect time stamp for new backup when reading from stdin + * Fix #1652: Ignore/remove invalid lock files + * Fix #1730: Ignore sockets for restore + * Fix #1684: Fix backend tests for rest-server + * Fix #1745: Correctly parse the argument to --tls-client-cert + * Enh #1433: Support UTF-16 encoding and process Byte Order Mark + * Enh #1561: Allow using rclone to access other services + * Enh #1665: Improve cache handling for `restic check` + * Enh #1721: Add `cache` command to list cache dirs + * Enh #1758: Allow saving OneDrive folders in Windows + * Enh #549: Rework archiver code + * Enh #1552: Use Google Application Default credentials + * Enh #1477: Accept AWS_SESSION_TOKEN for the s3 backend + * Enh #1648: Ignore AWS permission denied error when creating a repository + * Enh #1649: Add illumos/Solaris support + * Enh #1709: Improve messages `restic check` prints + * Enh #827: Add --new-password-file flag for non-interactive password changes + * Enh #1735: Allow keeping a time range of snaphots + * Enh #1782: Use default AWS credentials chain for S3 backend + +Details +------- + + * Bugfix #1608: Respect time stamp for new backup when reading from stdin + + When reading backups from stdin (via `restic backup --stdin`), restic now uses the time stamp + for the new backup passed in `--time`. + + https://github.com/restic/restic/issues/1608 + https://github.com/restic/restic/pull/1703 + + * Bugfix #1652: Ignore/remove invalid lock files + + This corrects a bug introduced recently: When an invalid lock file in the repo is encountered + (e.g. if the file is empty), the code used to ignore that, but now returns the error. Now, invalid + files are ignored for the normal lock check, and removed when `restic unlock --remove-all` is + run. + + https://github.com/restic/restic/issues/1652 + https://github.com/restic/restic/pull/1653 + + * Bugfix #1730: Ignore sockets for restore + + We've received a report and correct the behavior in which the restore code aborted restoring a + directory when a socket was encountered. Unix domain socket files cannot be restored (they are + created on the fly once a process starts listening). The error handling was corrected, and in + addition we're now ignoring sockets during restore. + + https://github.com/restic/restic/issues/1730 + https://github.com/restic/restic/pull/1731 + + * Bugfix #1684: Fix backend tests for rest-server + + The REST server for restic now requires an explicit parameter (`--no-auth`) if no + authentication should be allowed. This is fixed in the tests. + + https://github.com/restic/restic/pull/1684 + + * Bugfix #1745: Correctly parse the argument to --tls-client-cert + + Previously, the --tls-client-cert method attempt to read ARGV[1] (hardcoded) instead of the + argument that was passed to it. This has been corrected. + + https://github.com/restic/restic/issues/1745 + https://github.com/restic/restic/pull/1746 + + * Enhancement #1433: Support UTF-16 encoding and process Byte Order Mark + + On Windows, text editors commonly leave a Byte Order Mark at the beginning of the file to define + which encoding is used (oftentimes UTF-16). We've added code to support processing the BOMs in + text files, like the exclude files, the password file and the file passed via `--files-from`. + This does not apply to any file being saved in a backup, those are not touched and archived as they + are. + + https://github.com/restic/restic/issues/1433 + https://github.com/restic/restic/issues/1738 + https://github.com/restic/restic/pull/1748 + + * Enhancement #1561: Allow using rclone to access other services + + We've added the ability to use rclone to store backup data on all backends that it supports. This + was done in collaboration with Nick, the author of rclone. You can now use it to first configure a + service, then restic manages the rest (starting and stopping rclone). For details, please see + the manual. + + https://github.com/restic/restic/issues/1561 + https://github.com/restic/restic/pull/1657 + https://rclone.org + + * Enhancement #1665: Improve cache handling for `restic check` + + For safety reasons, restic does not use a local metadata cache for the `restic check` command, + so that data is loaded from the repository and restic can check it's in good condition. When the + cache is disabled, restic will fetch each tiny blob needed for checking the integrity using a + separate backend request. For non-local backends, that will take a long time, and depending on + the backend (e.g. B2) may also be much more expensive. + + This PR adds a few commits which will change the behavior as follows: + + * When `restic check` is called without any additional parameters, it will build a new cache in a + temporary directory, which is removed at the end of the check. This way, we'll get readahead for + metadata files (so restic will fetch the whole file when the first blob from the file is + requested), but all data is freshly fetched from the storage backend. This is the default + behavior and will work for almost all users. + + * When `restic check` is called with `--with-cache`, the default on-disc cache is used. This + behavior hasn't changed since the cache was introduced. + + * When `--no-cache` is specified, restic falls back to the old behavior, and read all tiny blobs + in separate requests. + + https://github.com/restic/restic/issues/1665 + https://github.com/restic/restic/issues/1694 + https://github.com/restic/restic/pull/1696 + + * Enhancement #1721: Add `cache` command to list cache dirs + + The command `cache` was added, it allows listing restic's cache directoriers together with + the last usage. It also allows removing old cache dirs without having to access a repo, via + `restic cache --cleanup` + + https://github.com/restic/restic/issues/1721 + https://github.com/restic/restic/pull/1749 + + * Enhancement #1758: Allow saving OneDrive folders in Windows + + Restic now contains a bugfix to two libraries, which allows saving OneDrive folders in + Windows. In order to use the newer versions of the libraries, the minimal version required to + compile restic is now Go 1.9. + + https://github.com/restic/restic/issues/1758 + https://github.com/restic/restic/pull/1765 + + * Enhancement #549: Rework archiver code + + The core archiver code and the complementary code for the `backup` command was rewritten + completely. This resolves very annoying issues such as 549. The first backup with this release + of restic will likely result in all files being re-read locally, so it will take a lot longer. The + next backup after that will be fast again. + + Basically, with the old code, restic took the last path component of each to-be-saved file or + directory as the top-level file/directory within the snapshot. This meant that when called as + `restic backup /home/user/foo`, the snapshot would contain the files in the directory + `/home/user/foo` as `/foo`. + + This is not the case any more with the new archiver code. Now, restic works very similar to what + `tar` does: When restic is called with an absolute path to save, then it'll preserve the + directory structure within the snapshot. For the example above, the snapshot would contain + the files in the directory within `/home/user/foo` in the snapshot. For relative + directories, it only preserves the relative path components. So `restic backup user/foo` + will save the files as `/user/foo` in the snapshot. + + While we were at it, the status display and notification system was completely rewritten. By + default, restic now shows which files are currently read (unless `--quiet` is specified) in a + multi-line status display. + + The `backup` command also gained a new option: `--verbose`. It can be specified once (which + prints a bit more detail what restic is doing) or twice (which prints a line for each + file/directory restic encountered, together with some statistics). + + Another issue that was resolved is the new code only reads two files at most. The old code would + read way too many files in parallel, thereby slowing down the backup process on spinning discs a + lot. + + https://github.com/restic/restic/issues/549 + https://github.com/restic/restic/issues/1286 + https://github.com/restic/restic/issues/446 + https://github.com/restic/restic/issues/1344 + https://github.com/restic/restic/issues/1416 + https://github.com/restic/restic/issues/1456 + https://github.com/restic/restic/issues/1145 + https://github.com/restic/restic/issues/1160 + https://github.com/restic/restic/pull/1494 + + * Enhancement #1552: Use Google Application Default credentials + + Google provide libraries to generate appropriate credentials with various fallback + sources. This change uses the library to generate our GCS client, which allows us to make use of + these extra methods. + + This should be backward compatible with previous restic behaviour while adding the + additional capabilities to auth from Google's internal metadata endpoints. For users + running restic in GCP this can make authentication far easier than it was before. + + https://github.com/restic/restic/pull/1552 + https://developers.google.com/identity/protocols/application-default-credentials + + * Enhancement #1477: Accept AWS_SESSION_TOKEN for the s3 backend + + Before, it was not possible to use s3 backend with AWS temporary security credentials(with + AWS_SESSION_TOKEN). This change gives higher priority to credentials.EnvAWS credentials + provider. + + https://github.com/restic/restic/issues/1477 + https://github.com/restic/restic/pull/1479 + https://github.com/restic/restic/pull/1647 + + * Enhancement #1648: Ignore AWS permission denied error when creating a repository + + It's not possible to use s3 backend scoped to a subdirectory(with specific permissions). + Restic doesn't try to create repository in a subdirectory, when 'bucket exists' of parent + directory check fails due to permission issues. + + https://github.com/restic/restic/pull/1648 + + * Enhancement #1649: Add illumos/Solaris support + + https://github.com/restic/restic/pull/1649 + + * Enhancement #1709: Improve messages `restic check` prints + + Some messages `restic check` prints are not really errors, so from now on restic does not treat + them as errors any more and exits cleanly. + + https://github.com/restic/restic/pull/1709 + https://forum.restic.net/t/what-is-the-standard-procedure-to-follow-if-a-backup-or-restore-is-interrupted/571/2 + + * Enhancement #827: Add --new-password-file flag for non-interactive password changes + + This makes it possible to change a repository password without being prompted. + + https://github.com/restic/restic/issues/827 + https://github.com/restic/restic/pull/1720 + https://forum.restic.net/t/changing-repo-password-without-prompt/591 + + * Enhancement #1735: Allow keeping a time range of snaphots + + We've added the `--keep-within` option to the `forget` command. It instructs restic to keep + all snapshots within the given duration since the newest snapshot. For example, running + `restic forget --keep-within 5m7d` will keep all snapshots which have been made in the five + months and seven days since the latest snapshot. + + https://github.com/restic/restic/pull/1735 + + * Enhancement #1782: Use default AWS credentials chain for S3 backend + + Adds support for file credentials to the S3 backend (e.g. ~/.aws/credentials), and reorders + the credentials chain for the S3 backend to match AWS's standard, which is static credentials, + env vars, credentials file, and finally remote. + + https://github.com/restic/restic/pull/1782 + + +Changelog for restic 0.8.3 (2018-02-26) +======================================= + +The following sections list the changes in restic 0.8.3 relevant to +restic users. The changes are ordered by importance. + +Summary +------- + + * Fix #1633: Fixed unexpected 'pack file cannot be listed' error + * Fix #1641: Ignore files with invalid names in the repo + * Fix #1638: Handle errors listing files in the backend + * Enh #1497: Add --read-data-subset flag to check command + * Enh #1560: Retry all repository file download errors + * Enh #1623: Don't check for presence of files in the backend before writing + * Enh #1634: Upgrade B2 client library, reduce HTTP requests + +Details +------- + + * Bugfix #1633: Fixed unexpected 'pack file cannot be listed' error + + Due to a regression introduced in 0.8.2, the `rebuild-index` and `prune` commands failed to + read pack files with size of 587, 588, 589 or 590 bytes. + + https://github.com/restic/restic/issues/1633 + https://github.com/restic/restic/pull/1635 + + * Bugfix #1641: Ignore files with invalid names in the repo + + The release 0.8.2 introduced a bug: when restic encounters files in the repo which do not have a + valid name, it tries to load a file with a name of lots of zeroes instead of ignoring it. This is now + resolved, invalid file names are just ignored. + + https://github.com/restic/restic/issues/1641 + https://github.com/restic/restic/pull/1643 + https://forum.restic.net/t/help-fixing-repo-no-such-file/485/3 + + * Bugfix #1638: Handle errors listing files in the backend + + A user reported in the forum that restic completes a backup although a concurrent `prune` + operation was running. A few error messages were printed, but the backup was attempted and + completed successfully. No error code was returned. + + This should not happen: The repository is exclusively locked during `prune`, so when `restic + backup` is run in parallel, it should abort and return an error code instead. + + It was found that the bug was in the code introduced only recently, which retries a List() + operation on the backend should that fail. It is now corrected. + + https://github.com/restic/restic/pull/1638 + https://forum.restic.net/t/restic-backup-returns-0-exit-code-when-already-locked/484 + + * Enhancement #1497: Add --read-data-subset flag to check command + + This change introduces ability to check integrity of a subset of repository data packs. This + can be used to spread integrity check of larger repositories over a period of time. + + https://github.com/restic/restic/issues/1497 + https://github.com/restic/restic/pull/1556 + + * Enhancement #1560: Retry all repository file download errors + + Restic will now retry failed downloads, similar to other operations. + + https://github.com/restic/restic/pull/1560 + + * Enhancement #1623: Don't check for presence of files in the backend before writing + + Before, all backend implementations were required to return an error if the file that is to be + written already exists in the backend. For most backends, that means making a request (e.g. via + HTTP) and returning an error when the file already exists. + + This is not accurate, the file could have been created between the HTTP request testing for it, + and when writing starts, so we've relaxed this requeriment, which saves one additional HTTP + request per newly added file. + + https://github.com/restic/restic/pull/1623 + + * Enhancement #1634: Upgrade B2 client library, reduce HTTP requests + + We've upgraded the B2 client library restic uses to access BackBlaze B2. This reduces the + number of HTTP requests needed to upload a new file from two to one, which should improve + throughput to B2. + + https://github.com/restic/restic/pull/1634 + + +Changelog for restic 0.8.2 (2018-02-17) +======================================= + +The following sections list the changes in restic 0.8.2 relevant to +restic users. The changes are ordered by importance. + +Summary +------- + + * Fix #1506: Limit bandwith at the http.RoundTripper for HTTP based backends + * Fix #1512: Restore directory permissions as the last step + * Fix #1528: Correctly create missing subdirs in data/ + * Fix #1590: Strip spaces for lines read via --files-from + * Fix #1589: Complete intermediate index upload + * Fix #1594: Google Cloud Storage: Use generic HTTP transport + * Fix #1595: Backup: Remove bandwidth display + * Enh #1522: Add support for TLS client certificate authentication + * Enh #1541: Reduce number of remote requests during repository check + * Enh #1567: Reduce number of backend requests for rebuild-index and prune + * Enh #1507: Only reload snapshots once per minute for fuse mount + * Enh #1538: Reduce memory allocations for querying the index + * Enh #1549: Speed up querying across indices and scanning existing files + * Enh #1554: Fuse/mount: Correctly handle EOF, add template option + * Enh #1564: Don't terminate ssh on SIGINT + * Enh #1579: Retry Backend.List() in case of errors + * Enh #1584: Limit index file size + +Details +------- + + * Bugfix #1506: Limit bandwith at the http.RoundTripper for HTTP based backends + + https://github.com/restic/restic/issues/1506 + https://github.com/restic/restic/pull/1511 + + * Bugfix #1512: Restore directory permissions as the last step + + This change allows restoring into directories that were not writable during backup. Before, + restic created the directory, set the read-only mode and then failed to create files in the + directory. This change now restores the directory (with its permissions) as the very last + step. + + https://github.com/restic/restic/issues/1512 + https://github.com/restic/restic/pull/1536 + + * Bugfix #1528: Correctly create missing subdirs in data/ + + https://github.com/restic/restic/issues/1528 + https://github.com/restic/restic/pull/1529 + + * Bugfix #1590: Strip spaces for lines read via --files-from + + Leading and trailing spaces in lines read via `--files-from` are now stripped, so it behaves + the same as with lines read via `--exclude-file`. + + https://github.com/restic/restic/issues/1590 + https://github.com/restic/restic/pull/1613 + + * Bugfix #1589: Complete intermediate index upload + + After a user posted a comprehensive report of what he observed, we were able to find a bug and + correct it: During backup, restic uploads so-called "intermediate" index files. When the + backup finishes during a transfer of such an intermediate index, the upload is cancelled, but + the backup is finished without an error. This leads to an inconsistent state, where the + snapshot references data that is contained in the repo, but is not referenced in any index. + + The situation can be resolved by building a new index with `rebuild-index`, but looks very + confusing at first. Since all the data got uploaded to the repo successfully, there was no risk + of data loss, just minor inconvenience for our users. + + https://github.com/restic/restic/pull/1589 + https://forum.restic.net/t/error-loading-tree-check-prune-and-forget-gives-error-b2-backend/406 + + * Bugfix #1594: Google Cloud Storage: Use generic HTTP transport + + It was discovered that the Google Cloud Storage backend did not use the generic HTTP transport, + so things such as bandwidth limiting with `--limit-upload` did not work. This is resolved now. + + https://github.com/restic/restic/pull/1594 + + * Bugfix #1595: Backup: Remove bandwidth display + + This commit removes the bandwidth displayed during backup process. It is misleading and + seldomly correct, because it's neither the "read bandwidth" (only for the very first backup) + nor the "upload bandwidth". Many users are confused about (and rightly so), c.f. #1581, #1033, + #1591 + + We'll eventually replace this display with something more relevant when the new archiver code + is ready. + + https://github.com/restic/restic/pull/1595 + + * Enhancement #1522: Add support for TLS client certificate authentication + + Support has been added for using a TLS client certificate for authentication to HTTP based + backend. A file containing the PEM encoded private key and certificate can be set using the + `--tls-client-cert` option. + + https://github.com/restic/restic/issues/1522 + https://github.com/restic/restic/pull/1524 + + * Enhancement #1541: Reduce number of remote requests during repository check + + This change eliminates redundant remote repository calls and significantly improves + repository check time. + + https://github.com/restic/restic/issues/1541 + https://github.com/restic/restic/pull/1548 + + * Enhancement #1567: Reduce number of backend requests for rebuild-index and prune + + We've found a way to reduce then number of backend requests for the `rebuild-index` and `prune` + operations. This significantly speeds up the operations for high-latency backends. + + https://github.com/restic/restic/issues/1567 + https://github.com/restic/restic/pull/1574 + https://github.com/restic/restic/pull/1575 + + * Enhancement #1507: Only reload snapshots once per minute for fuse mount + + https://github.com/restic/restic/pull/1507 + + * Enhancement #1538: Reduce memory allocations for querying the index + + This change reduces the internal memory allocations when the index data structures in memory + are queried if a blob (part of a file) already exists in the repo. It should speed up backup a bit, + and maybe even reduce RAM usage. + + https://github.com/restic/restic/pull/1538 + + * Enhancement #1549: Speed up querying across indices and scanning existing files + + This change increases the whenever a blob (part of a file) is searched for in a restic + repository. This will reduce cpu usage some when backing up files already backed up by restic. + Cpu usage is further decreased when scanning files. + + https://github.com/restic/restic/pull/1549 + + * Enhancement #1554: Fuse/mount: Correctly handle EOF, add template option + + We've added the `--snapshot-template` string, which can be used to specify a template for a + snapshot directory. In addition, accessing data after the end of a file via the fuse mount is now + handled correctly. + + https://github.com/restic/restic/pull/1554 + + * Enhancement #1564: Don't terminate ssh on SIGINT + + We've reworked the code which runs the `ssh` login for the sftp backend so that it can prompt for a + password (if needed) but does not exit when the user presses CTRL+C (SIGINT) e.g. during + backup. This allows restic to properly shut down when it receives SIGINT and remove the lock + file from the repo, afterwards exiting the `ssh` process. + + https://github.com/restic/restic/pull/1564 + https://github.com/restic/restic/pull/1588 + + * Enhancement #1579: Retry Backend.List() in case of errors + + https://github.com/restic/restic/pull/1579 + + * Enhancement #1584: Limit index file size + + Before, restic would create a single new index file on `prune` or `rebuild-index`, this may + lead to memory problems when this huge index is created and loaded again. We're now limiting the + size of the index file, and split newly created index files into several smaller ones. This + allows restic to be more memory-efficient. + + https://github.com/restic/restic/issues/1412 + https://github.com/restic/restic/issues/979 + https://github.com/restic/restic/issues/526 + https://github.com/restic/restic/pull/1584 + + +Changelog for restic 0.8.1 (2017-12-27) +======================================= + +The following sections list the changes in restic 0.8.1 relevant to +restic users. The changes are ordered by importance. + +Summary +------- + + * Fix #1457: Improve s3 backend with DigitalOcean Spaces + * Fix #1454: Correct cache dir location for Windows and Darwin + * Fix #1459: Disable handling SIGPIPE + * Chg #1452: Do not save atime by default + * Enh #1436: Add code to detect old cache directories + * Enh #1439: Improve cancellation logic + * Enh #11: Add the `diff` command + +Details +------- + + * Bugfix #1457: Improve s3 backend with DigitalOcean Spaces + + https://github.com/restic/restic/issues/1457 + https://github.com/restic/restic/pull/1459 + + * Bugfix #1454: Correct cache dir location for Windows and Darwin + + The cache directory on Windows and Darwin was not correct, instead the directory `.cache` was + used. + + https://github.com/restic/restic/pull/1454 + + * Bugfix #1459: Disable handling SIGPIPE + + We've disabled handling SIGPIPE again. Turns out, writing to broken TCP connections also + raised SIGPIPE, so restic exits on the first write to a broken connection. Instead, restic + should retry the request. + + https://github.com/restic/restic/issues/1457 + https://github.com/restic/restic/issues/1466 + https://github.com/restic/restic/pull/1459 + + * Change #1452: Do not save atime by default + + By default, the access time for files and dirs is not saved any more. It is not possible to + reliably disable updating the access time during a backup, so for the next backup the access + time is different again. This means a lot of metadata is saved. If you want to save the access time + anyway, pass `--with-atime` to the `backup` command. + + https://github.com/restic/restic/pull/1452 + + * Enhancement #1436: Add code to detect old cache directories + + We've added code to detect old cache directories of repositories that haven't been used in a + long time, restic now prints a note when it detects that such dirs exist. Also, the option + `--cleanup-cache` was added to automatically remove such directories. That's not a problem + because the cache will be rebuild once a repo is accessed again. + + https://github.com/restic/restic/pull/1436 + + * Enhancement #1439: Improve cancellation logic + + The cancellation logic was improved, restic can now shut down cleanly when requested to do so + (e.g. via ctrl+c). + + https://github.com/restic/restic/pull/1439 + + * Enhancement #11: Add the `diff` command + + The command `diff` was added, it allows comparing two snapshots and listing all differences. + + https://github.com/restic/restic/issues/11 + https://github.com/restic/restic/issues/1460 + https://github.com/restic/restic/pull/1462 + + +Changelog for restic 0.8.0 (2017-11-26) +======================================= + +The following sections list the changes in restic 0.8.0 relevant to +restic users. The changes are ordered by importance. + +Summary +------- + + * Sec #1445: Prevent writing outside the target directory during restore + * Fix #1256: Re-enable workaround for S3 backend + * Fix #1291: Reuse backend TCP connections to BackBlaze B2 + * Fix #1317: Run prune when `forget --prune` is called with just snapshot IDs + * Fix #1437: Remove implicit path `/restic` for the s3 backend + * Enh #1102: Add subdirectory `ids` to fuse mount + * Enh #1114: Add `--cacert` to specify TLS certificates to check against + * Enh #1216: Add upload/download limiting + * Enh #1271: Cache results for excludes for `backup` + * Enh #1274: Add `generate` command, replaces `manpage` and `autocomplete` + * Enh #1367: Allow comments in files read from via `--file-from` + * Enh #448: Sftp backend prompts for password + * Enh #510: Add `dump` command + * Enh #1040: Add local metadata cache + * Enh #1249: Add `latest` symlink in fuse mount + * Enh #1269: Add `--compact` to `forget` command + * Enh #1281: Google Cloud Storage backend needs less permissions + * Enh #1319: Make `check` print `no errors found` explicitly + * Enh #1353: Retry failed backend requests + +Details +------- + + * Security #1445: Prevent writing outside the target directory during restore + + A vulnerability was found in the restic restorer, which allowed attackers in special + circumstances to restore files to a location outside of the target directory. Due to the + circumstances we estimate this to be a low-risk vulnerability, but urge all users to upgrade to + the latest version of restic. + + Exploiting the vulnerability requires a Linux/Unix system which saves backups via restic and + a Windows systems which restores files from the repo. In addition, the attackers need to be able + to create create files with arbitrary names which are then saved to the restic repo. For + example, by creating a file named "..\test.txt" (which is a perfectly legal filename on Linux) + and restoring a snapshot containing this file on Windows, it would be written to the parent of + the target directory. + + We'd like to thank Tyler Spivey for reporting this responsibly! + + https://github.com/restic/restic/pull/1445 + + * Bugfix #1256: Re-enable workaround for S3 backend + + We've re-enabled a workaround for `minio-go` (the library we're using to access s3 backends), + this reduces memory usage. + + https://github.com/restic/restic/issues/1256 + https://github.com/restic/restic/pull/1267 + + * Bugfix #1291: Reuse backend TCP connections to BackBlaze B2 + + A bug was discovered in the library we're using to access Backblaze, it now reuses already + established TCP connections which should be a lot faster and not cause network failures any + more. + + https://github.com/restic/restic/issues/1291 + https://github.com/restic/restic/pull/1301 + + * Bugfix #1317: Run prune when `forget --prune` is called with just snapshot IDs + + A bug in the `forget` command caused `prune` not to be run when `--prune` was specified without a + policy, e.g. when only snapshot IDs that should be forgotten are listed manually. + + https://github.com/restic/restic/pull/1317 + + * Bugfix #1437: Remove implicit path `/restic` for the s3 backend + + The s3 backend used the subdir `restic` within a bucket if no explicit path after the bucket name + was specified. Since this version, restic does not use this default path any more. If you + created a repo on s3 in a bucket without specifying a path within the bucket, you need to add + `/restic` at the end of the repository specification to access your repo: + `s3:s3.amazonaws.com/bucket/restic` + + https://github.com/restic/restic/issues/1292 + https://github.com/restic/restic/pull/1437 + + * Enhancement #1102: Add subdirectory `ids` to fuse mount + + The fuse mount now has an `ids` subdirectory which contains the snapshots below their (short) + IDs. + + https://github.com/restic/restic/issues/1102 + https://github.com/restic/restic/pull/1299 + https://github.com/restic/restic/pull/1320 + + * Enhancement #1114: Add `--cacert` to specify TLS certificates to check against + + We've added the `--cacert` option which can be used to pass one (or more) CA certificates to + restic. These are used in addition to the system CA certificates to verify HTTPS certificates + (e.g. for the REST backend). + + https://github.com/restic/restic/issues/1114 + https://github.com/restic/restic/pull/1276 + + * Enhancement #1216: Add upload/download limiting + + We've added support for rate limiting through `--limit-upload` and `--limit-download` + flags. + + https://github.com/restic/restic/issues/1216 + https://github.com/restic/restic/pull/1336 + https://github.com/restic/restic/pull/1358 + + * Enhancement #1271: Cache results for excludes for `backup` + + The `backup` command now caches the result of excludes for a directory. + + https://github.com/restic/restic/issues/1271 + https://github.com/restic/restic/pull/1326 + + * Enhancement #1274: Add `generate` command, replaces `manpage` and `autocomplete` + + The `generate` command has been added, which replaces the now removed commands `manpage` and + `autocomplete`. This release of restic contains the most recent manpages in `doc/man` and the + auto-completion files for bash and zsh in `doc/bash-completion.sh` and + `doc/zsh-completion.zsh` + + https://github.com/restic/restic/issues/1274 + https://github.com/restic/restic/pull/1282 + + * Enhancement #1367: Allow comments in files read from via `--file-from` + + When the list of files/dirs to be saved is read from a file with `--files-from`, comment lines + (starting with `#`) are now ignored. + + https://github.com/restic/restic/issues/1367 + https://github.com/restic/restic/pull/1368 + + * Enhancement #448: Sftp backend prompts for password + + The sftp backend now prompts for the password if a password is necessary for login. + + https://github.com/restic/restic/issues/448 + https://github.com/restic/restic/pull/1270 + + * Enhancement #510: Add `dump` command + + We've added the `dump` command which prints a file from a snapshot to stdout. This can e.g. be + used to restore files read with `backup --stdin`. + + https://github.com/restic/restic/issues/510 + https://github.com/restic/restic/pull/1346 + + * Enhancement #1040: Add local metadata cache + + We've added a local cache for metadata so that restic doesn't need to load all metadata + (snapshots, indexes, ...) from the repo each time it starts. By default the cache is active, but + there's a new global option `--no-cache` that can be used to disable the cache. By deafult, the + cache a standard cache folder for the OS, which can be overridden with `--cache-dir`. The cache + will automatically populate, indexes and snapshots are saved as they are loaded. Cache + directories for repos that haven't been used recently can automatically be removed by restic + with the `--cleanup-cache` option. + + A related change was to by default create pack files in the repo that contain either data or + metadata, not both mixed together. This allows easy caching of only the metadata files. The + next run of `restic prune` will untangle mixed files automatically. + + https://github.com/restic/restic/issues/29 + https://github.com/restic/restic/issues/738 + https://github.com/restic/restic/issues/282 + https://github.com/restic/restic/pull/1040 + https://github.com/restic/restic/pull/1287 + https://github.com/restic/restic/pull/1436 + https://github.com/restic/restic/pull/1265 + + * Enhancement #1249: Add `latest` symlink in fuse mount + + The directory structure in the fuse mount now exposes a symlink `latest` which points to the + latest snapshot in that particular directory. + + https://github.com/restic/restic/pull/1249 + + * Enhancement #1269: Add `--compact` to `forget` command + + The option `--compact` was added to the `forget` command to provide the same compact view as the + `snapshots` command. + + https://github.com/restic/restic/pull/1269 + + * Enhancement #1281: Google Cloud Storage backend needs less permissions + + The Google Cloud Storage backend no longer requires the service account to have the + `storage.buckets.get` permission ("Storage Admin" role) in `restic init` if the bucket + already exists. + + https://github.com/restic/restic/pull/1281 + + * Enhancement #1319: Make `check` print `no errors found` explicitly + + The `check` command now explicetly prints `No errors were found` when no errors could be found. + + https://github.com/restic/restic/issues/1303 + https://github.com/restic/restic/pull/1319 + + * Enhancement #1353: Retry failed backend requests + + https://github.com/restic/restic/pull/1353 + + +Changelog for restic 0.7.3 (2017-09-20) +======================================= + +The following sections list the changes in restic 0.7.3 relevant to +restic users. The changes are ordered by importance. + +Summary +------- + + * Fix #1246: List all files stored in Google Cloud Storage + +Details +------- + + * Bugfix #1246: List all files stored in Google Cloud Storage + + For large backups stored in Google Cloud Storage, the `prune` command fails because listing + only returns the first 1000 files. This has been corrected, no data is lost in the process. In + addition, a plausibility check was added to `prune`. + + https://github.com/restic/restic/issues/1246 + https://github.com/restic/restic/pull/1247 + + +Changelog for restic 0.7.2 (2017-09-13) +======================================= + +The following sections list the changes in restic 0.7.2 relevant to +restic users. The changes are ordered by importance. + +Summary +------- + + * Fix #1167: Do not create a local repo unless `init` is used + * Fix #1164: Make the `key remove` command behave as documented + * Fix #1191: Make sure to write profiling files on interrupt + * Enh #1132: Make `key` command always prompt for a password + * Enh #1179: Resolve name conflicts, append a counter + * Enh #1218: Add `--compact` to `snapshots` command + * Enh #317: Add `--exclude-caches` and `--exclude-if-present` + * Enh #697: Automatically generate man pages for all restic commands + * Enh #1044: Improve `restore`, do not traverse/load excluded directories + * Enh #1061: Add Dockerfile and official Docker image + * Enh #1126: Use the standard Go git repository layout, use `dep` for vendoring + * Enh #1134: Add support for storing backups on Google Cloud Storage + * Enh #1144: Properly report errors when reading files with exclude patterns + * Enh #1149: Add support for storing backups on Microsoft Azure Blob Storage + * Enh #1196: Add `--group-by` to `forget` command for flexible grouping + * Enh #1203: Print stats on all BSD systems when SIGINFO (ctrl+t) is received + * Enh #1205: Allow specifying time/date for a backup with `--time` + +Details +------- + + * Bugfix #1167: Do not create a local repo unless `init` is used + + When a restic command other than `init` is used with a local repository and the repository + directory does not exist, restic creates the directory structure. That's an error, only the + `init` command should create the dir. + + https://github.com/restic/restic/issues/1167 + https://github.com/restic/restic/pull/1182 + + * Bugfix #1164: Make the `key remove` command behave as documented + + https://github.com/restic/restic/pull/1164 + + * Bugfix #1191: Make sure to write profiling files on interrupt + + Since a few releases restic had the ability to write profiling files for memory and CPU usage + when `debug` is enabled. It was discovered that when restic is interrupted (ctrl+c is + pressed), the proper shutdown hook is not run. This is now corrected. + + https://github.com/restic/restic/pull/1191 + + * Enhancement #1132: Make `key` command always prompt for a password + + The `key` command now prompts for a password even if the original password to access a repo has + been specified via the `RESTIC_PASSWORD` environment variable or a password file. + + https://github.com/restic/restic/issues/1132 + https://github.com/restic/restic/pull/1133 + + * Enhancement #1179: Resolve name conflicts, append a counter + + https://github.com/restic/restic/issues/1179 + https://github.com/restic/restic/pull/1209 + + * Enhancement #1218: Add `--compact` to `snapshots` command + + The option `--compact` was added to the `snapshots` command to get a better overview of the + snapshots in a repo. It limits each snapshot to a single line. + + https://github.com/restic/restic/issues/1218 + https://github.com/restic/restic/pull/1223 + + * Enhancement #317: Add `--exclude-caches` and `--exclude-if-present` + + A new option `--exclude-caches` was added that allows excluding cache directories (that are + tagged as such). This is a special case of a more generic option `--exclude-if-present` which + excludes a directory if a file with a specific name (and contents) is present. + + https://github.com/restic/restic/issues/317 + https://github.com/restic/restic/pull/1170 + https://github.com/restic/restic/pull/1224 + + * Enhancement #697: Automatically generate man pages for all restic commands + + https://github.com/restic/restic/issues/697 + https://github.com/restic/restic/pull/1147 + + * Enhancement #1044: Improve `restore`, do not traverse/load excluded directories + + https://github.com/restic/restic/pull/1044 + + * Enhancement #1061: Add Dockerfile and official Docker image + + https://github.com/restic/restic/pull/1061 + + * Enhancement #1126: Use the standard Go git repository layout, use `dep` for vendoring + + The git repository layout was changed to resemble the layout typically used in Go projects, + we're not using `gb` for building restic any more and vendoring the dependencies is now taken + care of by `dep`. + + https://github.com/restic/restic/pull/1126 + + * Enhancement #1134: Add support for storing backups on Google Cloud Storage + + https://github.com/restic/restic/issues/211 + https://github.com/restic/restic/pull/1134 + https://github.com/restic/restic/pull/1052 + + * Enhancement #1144: Properly report errors when reading files with exclude patterns + + https://github.com/restic/restic/pull/1144 + + * Enhancement #1149: Add support for storing backups on Microsoft Azure Blob Storage + + The library we're using to access the service requires Go 1.8, so restic now needs at least Go + 1.8. + + https://github.com/restic/restic/issues/609 + https://github.com/restic/restic/pull/1149 + https://github.com/restic/restic/pull/1059 + + * Enhancement #1196: Add `--group-by` to `forget` command for flexible grouping + + https://github.com/restic/restic/pull/1196 + + * Enhancement #1203: Print stats on all BSD systems when SIGINFO (ctrl+t) is received + + https://github.com/restic/restic/pull/1203 + https://github.com/restic/restic/pull/1082#issuecomment-326279920 + + * Enhancement #1205: Allow specifying time/date for a backup with `--time` + + https://github.com/restic/restic/pull/1205 + + +Changelog for restic 0.7.1 (2017-07-22) +======================================= + +The following sections list the changes in restic 0.7.1 relevant to +restic users. The changes are ordered by importance. + +Summary +------- + + * Fix #1115: Fix `prune`, only include existing files in indexes + * Enh #1055: Create subdirs below `data/` for local/sftp backends + * Enh #1067: Allow loading credentials for s3 from IAM + * Enh #1073: Add `migrate` cmd to migrate from `s3legacy` to `default` layout + * Enh #1081: Clarify semantic for `--tasg` for the `forget` command + * Enh #1080: Ignore chmod() errors on filesystems which do not support it + * Enh #1082: Print stats on SIGINFO on Darwin and FreeBSD (ctrl+t) + +Details +------- + + * Bugfix #1115: Fix `prune`, only include existing files in indexes + + A bug was found (and corrected) in the index rebuilding after prune, which led to indexes which + include blobs that were not present in the repo any more. There were already checks in place + which detected this situation and aborted with an error message. A new run of either `prune` or + `rebuild-index` corrected the index files. This is now fixed and a test has been added to detect + this. + + https://github.com/restic/restic/pull/1115 + + * Enhancement #1055: Create subdirs below `data/` for local/sftp backends + + The local and sftp backends now create the subdirs below `data/` on open/init. This way, restic + makes sure that they always exist. This is connected to an issue for the sftp server. + + https://github.com/restic/restic/issues/1055 + https://github.com/restic/rest-server/pull/11#issuecomment-309879710 + https://github.com/restic/restic/pull/1077 + https://github.com/restic/restic/pull/1105 + + * Enhancement #1067: Allow loading credentials for s3 from IAM + + When no S3 credentials are specified in the environment variables, restic now tries to load + credentials from an IAM instance profile when the s3 backend is used. + + https://github.com/restic/restic/issues/1067 + https://github.com/restic/restic/pull/1086 + + * Enhancement #1073: Add `migrate` cmd to migrate from `s3legacy` to `default` layout + + The `migrate` command for changing the `s3legacy` layout to the `default` layout for s3 + backends has been improved: It can now be restarted with `restic migrate --force s3_layout` + and automatically retries operations on error. + + https://github.com/restic/restic/issues/1073 + https://github.com/restic/restic/pull/1075 + + * Enhancement #1081: Clarify semantic for `--tasg` for the `forget` command + + https://github.com/restic/restic/issues/1081 + https://github.com/restic/restic/pull/1090 + + * Enhancement #1080: Ignore chmod() errors on filesystems which do not support it + + https://github.com/restic/restic/pull/1080 + https://github.com/restic/restic/pull/1112 + + * Enhancement #1082: Print stats on SIGINFO on Darwin and FreeBSD (ctrl+t) + + https://github.com/restic/restic/pull/1082 + + +Changelog for restic 0.7.0 (2017-07-01) +======================================= + +The following sections list the changes in restic 0.7.0 relevant to +restic users. The changes are ordered by importance. + +Summary +------- + + * Fix #1013: Switch back to using the high-level minio-go API for s3 + * Fix #965: Switch to `default` repo layout for the s3 backend + * Enh #1021: Detect invalid backend name and print error + * Enh #1029: Remove invalid pack files when `prune` is run + * Enh #512: Add Backblaze B2 backend + * Enh #636: Add dirs `tags` and `hosts` to fuse mount + * Enh #989: Improve performance of the `find` command + * Enh #975: Add new backend for OpenStack Swift + * Enh #998: Improve performance of the fuse mount + +Details +------- + + * Bugfix #1013: Switch back to using the high-level minio-go API for s3 + + For the s3 backend we're back to using the high-level API the s3 client library for uploading + data, a few users reported dropped connections (which the library will automatically retry + now). + + https://github.com/restic/restic/issues/1013 + https://github.com/restic/restic/issues/1023 + https://github.com/restic/restic/pull/1025 + + * Bugfix #965: Switch to `default` repo layout for the s3 backend + + The default layout for the s3 backend is now `default` (instead of `s3legacy`). Also, there's a + new `migrate` command to convert an existing repo, it can be run like this: `restic migrate + s3_layout` + + https://github.com/restic/restic/issues/965 + https://github.com/restic/restic/pull/1004 + + * Enhancement #1021: Detect invalid backend name and print error + + Restic now tries to detect when an invalid/unknown backend is used and returns an error + message. + + https://github.com/restic/restic/issues/1021 + https://github.com/restic/restic/pull/1070 + + * Enhancement #1029: Remove invalid pack files when `prune` is run + + The `prune` command has been improved and will now remove invalid pack files, for example files + that have not been uploaded completely because a backup was interrupted. + + https://github.com/restic/restic/issues/1029 + https://github.com/restic/restic/pull/1036 + + * Enhancement #512: Add Backblaze B2 backend + + https://github.com/restic/restic/issues/512 + https://github.com/restic/restic/pull/978 + + * Enhancement #636: Add dirs `tags` and `hosts` to fuse mount + + The fuse mount now has two more directories: `tags` contains a subdir for each tag, which in turn + contains only the snapshots that have this tag. The subdir `hosts` contains a subdir for each + host that has a snapshot, and the subdir contains the snapshots for that host. + + https://github.com/restic/restic/issues/636 + https://github.com/restic/restic/pull/1050 + + * Enhancement #989: Improve performance of the `find` command + + Improved performance for the `find` command: Restic recognizes paths it has already checked + for the files in question, so the number of backend requests is reduced a lot. + + https://github.com/restic/restic/issues/989 + https://github.com/restic/restic/pull/993 + + * Enhancement #975: Add new backend for OpenStack Swift + + https://github.com/restic/restic/pull/975 + https://github.com/restic/restic/pull/648 + + * Enhancement #998: Improve performance of the fuse mount + + Listing directories which contain large files now is significantly faster. + + https://github.com/restic/restic/pull/998 + + +Changelog for restic 0.6.1 (2017-06-01) +======================================= + +The following sections list the changes in restic 0.6.1 relevant to +restic users. The changes are ordered by importance. + +Summary +------- + + * Enh #985: Allow multiple parallel idle HTTP connections + * Enh #981: Remove temporary path from binary in `build.go` + * Enh #974: Remove regular status reports + +Details +------- + + * Enhancement #985: Allow multiple parallel idle HTTP connections + + Backends based on HTTP now allow several idle connections in parallel. This is especially + important for the REST backend, which (when used with a local server) may create a lot + connections and exhaust available ports quickly. + + https://github.com/restic/restic/issues/985 + https://github.com/restic/restic/pull/986 + + * Enhancement #981: Remove temporary path from binary in `build.go` + + The `build.go` now strips the temporary directory used for compilation from the binary. This + is the first step in enabling reproducible builds. + + https://github.com/restic/restic/pull/981 + + * Enhancement #974: Remove regular status reports + + Regular status report: We've removed the status report that was printed every 10 seconds when + restic is run non-interactively. You can still force reporting the current status by sending a + `USR1` signal to the process. + + https://github.com/restic/restic/pull/974 + + +Changelog for restic 0.6.0 (2017-05-29) +======================================= + +The following sections list the changes in restic 0.6.0 relevant to +restic users. The changes are ordered by importance. + +Summary +------- + + * Enh #957: Make `forget` consistent + * Enh #966: Unify repository layout for all backends + * Enh #962: Improve memory and runtime for the s3 backend + +Details +------- + + * Enhancement #957: Make `forget` consistent + + The `forget` command was corrected to be more consistent in which snapshots are to be + forgotten. It is possible that the new code removes more snapshots than before, so please + review what would be deleted by using the `--dry-run` option. + + https://github.com/restic/restic/issues/953 + https://github.com/restic/restic/pull/957 + + * Enhancement #966: Unify repository layout for all backends + + Up to now the s3 backend used a special repository layout. We've decided to unify the repository + layout and implemented the default layout also for the s3 backend. For creating a new + repository on s3 with the default layout, use `restic -o s3.layout=default init`. For further + commands the option is not necessary any more, restic will automatically detect the correct + layout to use. A future version will switch to the default layout for new repositories. + + https://github.com/restic/restic/issues/965 + https://github.com/restic/restic/pull/966 + + * Enhancement #962: Improve memory and runtime for the s3 backend + + We've updated the library used for accessing s3, switched to using a lower level API and added + caching for some requests. This lead to a decrease in memory usage and a great speedup. In + addition, we added benchmark functions for all backends, so we can track improvements over + time. The Continuous Integration test service we're using (Travis) now runs the s3 backend + tests not only against a Minio server, but also against the Amazon s3 live service, so we should + be notified of any regressions much sooner. + + https://github.com/restic/restic/pull/962 + https://github.com/restic/restic/pull/960 + https://github.com/restic/restic/pull/946 + https://github.com/restic/restic/pull/938 + https://github.com/restic/restic/pull/883 + + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..54d35590d --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,214 @@ +This document describes the way you can contribute to the restic project. + +Ways to Help Out +================ + +Thank you for your contribution! Please **open an issue first** (or add a +comment to an existing issue) if you plan to work on any code or add a new +feature. This way, duplicate work is prevented and we can discuss your ideas +and design first. + +There are several ways you can help us out. First of all code contributions and +bug fixes are most welcome. However even "minor" details as fixing spelling +errors, improving documentation or pointing out usability issues are a great +help also. + + +The restic project uses the GitHub infrastructure (see the +[project page](https://github.com/restic/restic)) for all related discussions +as well as the `#restic` channel on `irc.freenode.net`. + +If you want to find an area that currently needs improving have a look at the +open issues listed at the +[issues page](https://github.com/restic/restic/issues). This is also the place +for discussing enhancement to the restic tools. + +If you are unsure what to do, please have a look at the issues, especially +those tagged +[minor complexity](https://github.com/restic/restic/labels/minor%20complexity). + + +Reporting Bugs +============== + +You've found a bug? Thanks for letting us know so we can fix it! It is a good +idea to describe in detail how to reproduce the bug (when you know how), what +environment was used and so on. Please tell us at least the following things: + + * What's the version of restic you used? Please include the output of + `restic version` in your bug report. + * What commands did you execute to get to where the bug occurred? + * What did you expect? + * What happened instead? + * Are you aware of a way to reproduce the bug? + +Remember, the easier it is for us to reproduce the bug, the earlier it will be +corrected! + +In addition, you can compile restic with debug support by running +`go run -mod=vendor build.go -tags debug` and instructing it to create a debug +log by setting the environment variable `DEBUG_LOG` to a file, e.g. like this: + + $ export DEBUG_LOG=/tmp/restic-debug.log + $ restic backup ~/work + +For Go < 1.11, you need to remove the `-mod=vendor` option from the build +command. + +Please be aware that the debug log file will contain potentially sensitive +things like file and directory names, so please either redact it before +uploading it somewhere or post only the parts that are really relevant. + + +Development Environment +======================= + +The repository contains several sets of directories with code: `cmd/` and +`internal/` contain the code written for restic, whereas `vendor/` contains +copies of libraries restic depends on. The libraries are managed with the +command `go mod vendor`. + +Go >= 1.11 +---------- + +For Go version 1.11 or later, you should clone the repo (without having +`$GOPATH` set) and `cd` into the directory: + + $ unset GOPATH + $ git clone https://github.com/restic/restic + $ cd restic + +Then use the `go` tool to build restic: + + $ go build ./cmd/restic + $ ./restic version + restic 0.9.2-dev (compiled manually) compiled with go1.11 on linux/amd64 + +You can run all tests with the following command: + + $ go test ./... + +Go < 1.11 +--------- + +In order to compile restic with Go before 1.11, it needs to be checked out at +the right path within a `GOPATH`. The concept of a `GOPATH` is explained in +["How to write Go code"](https://golang.org/doc/code.html). + +If you do not have a directory with Go code yet, executing the following +instructions in your shell will create one for you and check out the restic +repo: + + $ export GOPATH="$HOME/go" + $ mkdir -p "$GOPATH/src/github.com/restic" + $ cd "$GOPATH/src/github.com/restic" + $ git clone https://github.com/restic/restic + $ cd restic + +You can then build restic as follows: + + $ go build ./cmd/restic + $ ./restic version + restic compiled manually + compiled with go1.8.3 on linux/amd64 + +The following commands can be used to run all the tests: + + $ go test ./... + +Providing Patches +================= + +You have fixed an annoying bug or have added a new feature? Very cool! Let's +get it into the project! The workflow we're using is also described on the +[GitHub Flow](https://guides.github.com/introduction/flow/) website, it boils +down to the following steps: + + 0. If you want to work on something, please add a comment to the issue on + GitHub. For a new feature, please add an issue before starting to work on + it, so that duplicate work is prevented. + + 1. First we would kindly ask you to fork our project on GitHub if you haven't + done so already. + + 2. Clone the repository locally and create a new branch. If you are working on + the code itself, please set up the development environment as described in + the previous section. Especially take care to place your forked repository + at the correct path (`src/github.com/restic/restic`) within your `GOPATH`. + + 3. Then commit your changes as fine grained as possible, as smaller patches, + that handle one and only one issue are easier to discuss and merge. + + 4. Push the new branch with your changes to your fork of the repository. + + 5. Create a pull request by visiting the GitHub website, it will guide you + through the process. + + 6. You will receive comments on your code and the feature or bug that they + address. Maybe you need to rework some minor things, in this case push new + commits to the branch you created for the pull request (or amend the + existing commit, use common sense to decide which is better), they will be + automatically added to the pull request. + + 7. If your pull request changes anything that users should be aware of (a + bugfix, a new feature, ...) please add an entry to the file + ['CHANGELOG.md'](CHANGELOG.md). It will be used in the announcement of the + next stable release. While writing, ask yourself: If I were the user, what + would I need to be aware of with this change. + + 8. Once your code looks good and passes all the tests, we'll merge it. Thanks + a lot for your contribution! + +Please provide the patches for each bug or feature in a separate branch and +open up a pull request for each. + +The restic project uses the `gofmt` tool for Go source indentation, so please +run + + gofmt -w **/*.go + +in the project root directory before committing. For each Pull Request, the +formatting is tested with `gofmt` for the latest stable version of Go. +Installing the script `fmt-check` from https://github.com/edsrzf/gofmt-git-hook +locally as a pre-commit hook checks formatting before committing automatically, +just copy this script to `.git/hooks/pre-commit`. + +For each pull request, several different systems run the integration tests on +Linux, macOS and Windows. We won't merge any code that does not pass all tests +for all systems, so when a tests fails, try to find out what's wrong and fix +it. If you need help on this, please leave a comment in the pull request, and +we'll be glad to assist. Having a PR with failing integration tests is nothing +to be ashamed of. In contrast, that happens regularly for all of us. That's +what the tests are there for. + +Git Commits +----------- + +It would be good if you could follow the same general style regarding Git +commits as the rest of the project, this makes reviewing code, browsing the +history and triaging bugs much easier. + +Git commit messages have a very terse summary in the first line of the commit +message, followed by an empty line, followed by a more verbose description or a +List of changed things. For examples, please refer to the excellent [How to +Write a Git Commit Message](https://chris.beams.io/posts/git-commit/). + +If you change/add multiple different things that aren't related at all, try to +make several smaller commits. This is much easier to review. Using `git add -p` +allows staging and committing only some changes. + +Code Review +=========== + +The restic project encourages actively reviewing the code, as it will store +your precious data, so it's common practice to receive comments on provided +patches. + +If you are reviewing other contributor's code please consider the following +when reviewing: + +* Be nice. Please make the review comment as constructive as possible so all + participants will learn something from your review. + +As a contributor you might be asked to rewrite portions of your code to make it +fit better into the upstream sources. diff --git a/GOVERNANCE.md b/GOVERNANCE.md new file mode 100644 index 000000000..54ed85e07 --- /dev/null +++ b/GOVERNANCE.md @@ -0,0 +1,27 @@ +# restic project governance + +## Overview + +The restic project uses a governance model commonly described as Benevolent +Dictator For Life (BDFL). This document outlines our understanding of what this +means. It is derived from the [i3 window manager project +governance](https://raw.githubusercontent.com/i3/i3/next/.github/GOVERNANCE.md). + +## Roles + +* user: anyone who interacts with the restic project +* core contributor: a handful of people who have contributed significantly to + the project by any means (issue triage, support, documentation, code, etc.). + Core contributors are recognizable via GitHub’s "Member" badge. +* Benevolent Dictator For Life (BDFL): a single individual who makes decisions + when consensus cannot be reached. restic's current BDFL is [@fd0](https://github.com/fd0). + +## Decision making process + +In general, we try to reach consensus in discussions. In case consensus cannot +be reached, the BDFL makes a decision. + +## Contribution process + +The contribution process is described in a separate document called +[CONTRIBUTING](CONTRIBUTING.md). diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..29b2ee55a --- /dev/null +++ b/LICENSE @@ -0,0 +1,25 @@ +BSD 2-Clause License + +Copyright (c) 2014, Alexander Neumann +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..8e9b051db --- /dev/null +++ b/Makefile @@ -0,0 +1,13 @@ +.PHONY: all clean test restic + +all: restic + +restic: + go run -mod=vendor build.go || go run build.go + +clean: + rm -f restic + +test: + go test ./cmd/... ./internal/... + diff --git a/README.rst b/README.rst new file mode 100644 index 000000000..76110ea36 --- /dev/null +++ b/README.rst @@ -0,0 +1,137 @@ +|Documentation| |Build Status| |Build status| |Report Card| |Say Thanks| |TestCoverage| |Reviewed by Hound| + +Introduction +------------ + +restic is a backup program that is fast, efficient and secure. It supports the three major operating systems (Linux, macOS, Windows) and a few smaller ones (FreeBSD, OpenBSD). + +For detailed usage and installation instructions check out the `documentation `__. + +You can ask questions in our `Discourse forum `__. + +Quick start +----------- + +Once you've `installed +`__ restic, start +off with creating a repository for your backups: + +.. code-block:: console + + $ restic init --repo /tmp/backup + enter password for new backend: + enter password again: + created restic backend 085b3c76b9 at /tmp/backup + Please note that knowledge of your password is required to access the repository. + Losing your password means that your data is irrecoverably lost. + +and add some data: + +.. code-block:: console + + $ restic --repo /tmp/backup backup ~/work + enter password for repository: + scan [/home/user/work] + scanned 764 directories, 1816 files in 0:00 + [0:29] 100.00% 54.732 MiB/s 1.582 GiB / 1.582 GiB 2580 / 2580 items 0 errors ETA 0:00 + duration: 0:29, 54.47MiB/s + snapshot 40dc1520 saved + +Next you can either use ``restic restore`` to restore files or use ``restic +mount`` to mount the repository via fuse and browse the files from previous +snapshots. + +For more options check out the `online documentation `__. + +Backends +-------- + +Saving a backup on the same machine is nice but not a real backup strategy. +Therefore, restic supports the following backends for storing backups natively: + +- `Local directory `__ +- `sftp server (via SSH) `__ +- `HTTP REST server `__ (`protocol `__ `rest-server `__) +- `AWS S3 `__ (either from Amazon or using the `Minio `__ server) +- `OpenStack Swift `__ +- `BackBlaze B2 `__ +- `Microsoft Azure Blob Storage `__ +- `Google Cloud Storage `__ +- And many other services via the `rclone `__ `Backend `__ + +Design Principles +----------------- + +Restic is a program that does backups right and was designed with the +following principles in mind: + +- **Easy:** Doing backups should be a frictionless process, otherwise + you might be tempted to skip it. Restic should be easy to configure + and use, so that, in the event of a data loss, you can just restore + it. Likewise, restoring data should not be complicated. + +- **Fast**: Backing up your data with restic should only be limited by + your network or hard disk bandwidth so that you can backup your files + every day. Nobody does backups if it takes too much time. Restoring + backups should only transfer data that is needed for the files that + are to be restored, so that this process is also fast. + +- **Verifiable**: Much more important than backup is restore, so restic + enables you to easily verify that all data can be restored. + +- **Secure**: Restic uses cryptography to guarantee confidentiality and + integrity of your data. The location the backup data is stored is + assumed not to be a trusted environment (e.g. a shared space where + others like system administrators are able to access your backups). + Restic is built to secure your data against such attackers. + +- **Efficient**: With the growth of data, additional snapshots should + only take the storage of the actual increment. Even more, duplicate + data should be de-duplicated before it is actually written to the + storage back end to save precious backup space. + +Reproducible Builds +------------------- + +The binaries released with each restic version starting at 0.6.1 are +`reproducible `__, which means that you can +easily reproduce a byte identical version from the source code for that +release. Instructions on how to do that are contained in the +`builder repository `__. + +News +---- + +You can follow the restic project on Twitter `@resticbackup `__ or by subscribing to +the `development blog `__. + +License +------- + +Restic is licensed under `BSD 2-Clause License `__. You can find the +complete text in ``LICENSE``. + +Sponsorship +----------- + +Backend integration tests for Google Cloud Storage and Microsoft Azure Blob +Storage are sponsored by `AppsCode `__! + +|AppsCode| + +.. |Documentation| image:: https://readthedocs.org/projects/restic/badge/?version=latest + :target: https://restic.readthedocs.io/en/latest/?badge=latest +.. |Build Status| image:: https://travis-ci.com/restic/restic.svg?branch=master + :target: https://travis-ci.com/restic/restic +.. |Build status| image:: https://ci.appveyor.com/api/projects/status/nuy4lfbgfbytw92q/branch/master?svg=true + :target: https://ci.appveyor.com/project/fd0/restic/branch/master +.. |Report Card| image:: https://goreportcard.com/badge/github.com/restic/restic + :target: https://goreportcard.com/report/github.com/restic/restic +.. |Say Thanks| image:: https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg + :target: https://saythanks.io/to/restic +.. |TestCoverage| image:: https://codecov.io/gh/restic/restic/branch/master/graph/badge.svg + :target: https://codecov.io/gh/restic/restic +.. |AppsCode| image:: https://cdn.appscode.com/images/logo/appscode/ac-logo-color.png + :target: https://appscode.com +.. |Reviewed by Hound| image:: https://img.shields.io/badge/Reviewed_by-Hound-8E64B0.svg + :target: https://houndci.com diff --git a/VERSION b/VERSION new file mode 100644 index 000000000..a602fc9e2 --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +0.9.4 diff --git a/appveyor.yml b/appveyor.yml new file mode 100644 index 000000000..7065a219f --- /dev/null +++ b/appveyor.yml @@ -0,0 +1,32 @@ +clone_folder: c:\restic + +environment: + GOPATH: c:\gopath + +branches: + only: + - master + +cache: + - '%LocalAppData%\go-build' + +init: + - ps: >- + $app = Get-WmiObject -Class Win32_Product -Filter "Vendor = 'http://golang.org'" + + if ($app) { + $app.Uninstall() + } + +install: + - rmdir c:\go /s /q + - appveyor DownloadFile https://dl.google.com/go/go1.11.windows-amd64.msi + - msiexec /i go1.11.windows-amd64.msi /q + - go version + - go env + - appveyor DownloadFile http://sourceforge.netcologne.de/project/gnuwin32/tar/1.13-1/tar-1.13-1-bin.zip -FileName tar.zip + - 7z x tar.zip bin/tar.exe + - set PATH=bin/;%PATH% + +build_script: + - go run -mod=vendor run_integration_tests.go diff --git a/build.go b/build.go new file mode 100644 index 000000000..fdd6f943a --- /dev/null +++ b/build.go @@ -0,0 +1,632 @@ +// Description +// +// This program aims to make building Go programs for end users easier by just +// calling it with `go run`, without having to setup a GOPATH. +// +// For Go < 1.11, it'll create a new GOPATH in a temporary directory, then run +// `go build` on the package configured as Main in the Config struct. +// +// For Go >= 1.11 if the file go.mod is present, it'll use Go modules and not +// setup a GOPATH. It builds the package configured as Main in the Config +// struct with `go build -mod=vendor` to use the vendored dependencies. +// The variable GOPROXY is set to `off` so that no network calls are made. All +// files are copied to a temporary directory before `go build` is called within +// that directory. + +// BSD 2-Clause License +// +// Copyright (c) 2016-2018, Alexander Neumann +// All rights reserved. +// +// This file has been copied from the repository at: +// https://github.com/fd0/build-go +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build ignore_build_go + +package main + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" +) + +// config contains the configuration for the program to build. +var config = Config{ + Name: "restic", // name of the program executable and directory + Namespace: "github.com/restic/restic", // subdir of GOPATH, e.g. "github.com/foo/bar" + Main: "./cmd/restic", // package name for the main package + DefaultBuildTags: []string{"selfupdate"}, // specify build tags which are always used + Tests: []string{"./..."}, // tests to run + MinVersion: GoVersion{Major: 1, Minor: 9, Patch: 0}, // minimum Go version supported +} + +// Config configures the build. +type Config struct { + Name string + Namespace string + Main string + DefaultBuildTags []string + Tests []string + MinVersion GoVersion +} + +var ( + verbose bool + keepGopath bool + runTests bool + enableCGO bool + enablePIE bool + goVersion = ParseGoVersion(runtime.Version()) +) + +// copy all Go files in src to dst, creating directories on the fly, so calling +// +// copy("/tmp/gopath/src/github.com/restic/restic", "/home/u/restic") +// +// with "/home/u/restic" containing the file "foo.go" yields the following tree +// at "/tmp/gopath": +// +// /tmp/gopath +// └── src +// └── github.com +// └── restic +// └── restic +// └── foo.go +func copy(dst, src string) error { + verbosePrintf("copy contents of %v to %v\n", src, dst) + return filepath.Walk(src, func(name string, fi os.FileInfo, err error) error { + if name == src { + return err + } + + if name == ".git" { + return filepath.SkipDir + } + + if err != nil { + return err + } + + if fi.IsDir() { + return nil + } + + intermediatePath, err := filepath.Rel(src, name) + if err != nil { + return err + } + + fileSrc := filepath.Join(src, intermediatePath) + fileDst := filepath.Join(dst, intermediatePath) + + return copyFile(fileDst, fileSrc) + }) +} + +func directoryExists(dirname string) bool { + stat, err := os.Stat(dirname) + if err != nil && os.IsNotExist(err) { + return false + } + + return stat.IsDir() +} + +func fileExists(filename string) bool { + stat, err := os.Stat(filename) + if err != nil && os.IsNotExist(err) { + return false + } + + return stat.Mode().IsRegular() +} + +// copyFile creates dst from src, preserving file attributes and timestamps. +func copyFile(dst, src string) error { + fi, err := os.Stat(src) + if err != nil { + return err + } + + fsrc, err := os.Open(src) + if err != nil { + return err + } + + if err = os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + fmt.Printf("MkdirAll(%v)\n", filepath.Dir(dst)) + return err + } + + fdst, err := os.Create(dst) + if err != nil { + _ = fsrc.Close() + return err + } + + _, err = io.Copy(fdst, fsrc) + if err != nil { + _ = fsrc.Close() + _ = fdst.Close() + return err + } + + err = fdst.Close() + if err != nil { + _ = fsrc.Close() + return err + } + + err = fsrc.Close() + if err != nil { + return err + } + + err = os.Chmod(dst, fi.Mode()) + if err != nil { + return err + } + + return os.Chtimes(dst, fi.ModTime(), fi.ModTime()) +} + +// die prints the message with fmt.Fprintf() to stderr and exits with an error +// code. +func die(message string, args ...interface{}) { + fmt.Fprintf(os.Stderr, message, args...) + os.Exit(1) +} + +func showUsage(output io.Writer) { + fmt.Fprintf(output, "USAGE: go run build.go OPTIONS\n") + fmt.Fprintf(output, "\n") + fmt.Fprintf(output, "OPTIONS:\n") + fmt.Fprintf(output, " -v --verbose output more messages\n") + fmt.Fprintf(output, " -t --tags specify additional build tags\n") + fmt.Fprintf(output, " -k --keep-tempdir do not remove the temporary directory after build\n") + fmt.Fprintf(output, " -T --test run tests\n") + fmt.Fprintf(output, " -o --output set output file name\n") + fmt.Fprintf(output, " --enable-cgo use CGO to link against libc\n") + fmt.Fprintf(output, " --enable-pie use PIE buildmode\n") + fmt.Fprintf(output, " --goos value set GOOS for cross-compilation\n") + fmt.Fprintf(output, " --goarch value set GOARCH for cross-compilation\n") + fmt.Fprintf(output, " --goarm value set GOARM for cross-compilation\n") + fmt.Fprintf(output, " --tempdir dir use a specific directory for compilation\n") +} + +func verbosePrintf(message string, args ...interface{}) { + if !verbose { + return + } + + fmt.Printf("build: "+message, args...) +} + +// cleanEnv returns a clean environment with GOPATH, GOBIN and GO111MODULE +// removed (if present). +func cleanEnv() (env []string) { + removeKeys := map[string]struct{}{ + "GOPATH": struct{}{}, + "GOBIN": struct{}{}, + "GO111MODULE": struct{}{}, + } + + for _, v := range os.Environ() { + data := strings.SplitN(v, "=", 2) + name := data[0] + + if _, ok := removeKeys[name]; ok { + continue + } + + env = append(env, v) + } + + return env +} + +// build runs "go build args..." with GOPATH set to gopath. +func build(cwd string, env map[string]string, args ...string) error { + a := []string{"build"} + + if goVersion.AtLeast(GoVersion{1, 10, 0}) { + verbosePrintf("Go version is at least 1.10, using new syntax for -gcflags\n") + // use new prefix + a = append(a, "-asmflags", fmt.Sprintf("all=-trimpath=%s", cwd)) + a = append(a, "-gcflags", fmt.Sprintf("all=-trimpath=%s", cwd)) + } else { + a = append(a, "-asmflags", fmt.Sprintf("-trimpath=%s", cwd)) + a = append(a, "-gcflags", fmt.Sprintf("-trimpath=%s", cwd)) + } + if enablePIE { + a = append(a, "-buildmode=pie") + } + + a = append(a, args...) + cmd := exec.Command("go", a...) + cmd.Env = append(cleanEnv(), "GOPROXY=off") + for k, v := range env { + cmd.Env = append(cmd.Env, k+"="+v) + } + if !enableCGO { + cmd.Env = append(cmd.Env, "CGO_ENABLED=0") + } + + cmd.Dir = cwd + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + verbosePrintf("chdir %q\n", cwd) + verbosePrintf("go %q\n", a) + + return cmd.Run() +} + +// test runs "go test args..." with GOPATH set to gopath. +func test(cwd string, env map[string]string, args ...string) error { + args = append([]string{"test", "-count", "1"}, args...) + cmd := exec.Command("go", args...) + cmd.Env = append(cleanEnv(), "GOPROXY=off") + for k, v := range env { + cmd.Env = append(cmd.Env, k+"="+v) + } + if !enableCGO { + cmd.Env = append(cmd.Env, "CGO_ENABLED=0") + } + cmd.Dir = cwd + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + verbosePrintf("chdir %q\n", cwd) + verbosePrintf("go %q\n", args) + + return cmd.Run() +} + +// getVersion returns the version string from the file VERSION in the current +// directory. +func getVersionFromFile() string { + buf, err := ioutil.ReadFile("VERSION") + if err != nil { + verbosePrintf("error reading file VERSION: %v\n", err) + return "" + } + + return strings.TrimSpace(string(buf)) +} + +// getVersion returns a version string which is a combination of the contents +// of the file VERSION in the current directory and the version from git (if +// available). +func getVersion() string { + versionFile := getVersionFromFile() + versionGit := getVersionFromGit() + + verbosePrintf("version from file 'VERSION' is %q, version from git %q\n", + versionFile, versionGit) + + switch { + case versionFile == "": + return versionGit + case versionGit == "": + return versionFile + } + + return fmt.Sprintf("%s (%s)", versionFile, versionGit) +} + +// getVersionFromGit returns a version string that identifies the currently +// checked out git commit. +func getVersionFromGit() string { + cmd := exec.Command("git", "describe", + "--long", "--tags", "--dirty", "--always") + out, err := cmd.Output() + if err != nil { + verbosePrintf("git describe returned error: %v\n", err) + return "" + } + + version := strings.TrimSpace(string(out)) + verbosePrintf("git version is %s\n", version) + return version +} + +// Constants represents a set of constants that are set in the final binary to +// the given value via compiler flags. +type Constants map[string]string + +// LDFlags returns the string that can be passed to go build's `-ldflags`. +func (cs Constants) LDFlags() string { + l := make([]string, 0, len(cs)) + + for k, v := range cs { + l = append(l, fmt.Sprintf(`-X "%s=%s"`, k, v)) + } + + return strings.Join(l, " ") +} + +// GoVersion is the version of Go used to compile the project. +type GoVersion struct { + Major int + Minor int + Patch int +} + +// ParseGoVersion parses the Go version s. If s cannot be parsed, the returned GoVersion is null. +func ParseGoVersion(s string) (v GoVersion) { + if !strings.HasPrefix(s, "go") { + return + } + + s = s[2:] + data := strings.Split(s, ".") + if len(data) < 2 || len(data) > 3 { + // invalid version + return GoVersion{} + } + + var err error + + v.Major, err = strconv.Atoi(data[0]) + if err != nil { + return GoVersion{} + } + + // try to parse the minor version while removing an eventual suffix (like + // "rc2" or so) + for s := data[1]; s != ""; s = s[:len(s)-1] { + v.Minor, err = strconv.Atoi(s) + if err == nil { + break + } + } + + if v.Minor == 0 { + // no minor version found + return GoVersion{} + } + + if len(data) >= 3 { + v.Patch, err = strconv.Atoi(data[2]) + if err != nil { + return GoVersion{} + } + } + + return +} + +// AtLeast returns true if v is at least as new as other. If v is empty, true is returned. +func (v GoVersion) AtLeast(other GoVersion) bool { + var empty GoVersion + + // the empty version satisfies all versions + if v == empty { + return true + } + + if v.Major < other.Major { + return false + } + + if v.Minor < other.Minor { + return false + } + + if v.Patch < other.Patch { + return false + } + + return true +} + +func (v GoVersion) String() string { + return fmt.Sprintf("Go %d.%d.%d", v.Major, v.Minor, v.Patch) +} + +func main() { + if !goVersion.AtLeast(config.MinVersion) { + fmt.Fprintf(os.Stderr, "%s detected, this program requires at least %s\n", goVersion, config.MinVersion) + os.Exit(1) + } + + buildTags := config.DefaultBuildTags + + skipNext := false + params := os.Args[1:] + + goEnv := map[string]string{} + buildEnv := map[string]string{ + "GOOS": runtime.GOOS, + "GOARCH": runtime.GOARCH, + "GOARM": "", + } + + tempdir := "" + + var outputFilename string + + for i, arg := range params { + if skipNext { + skipNext = false + continue + } + + switch arg { + case "-v", "--verbose": + verbose = true + case "-k", "--keep-gopath": + keepGopath = true + case "-t", "-tags", "--tags": + if i+1 >= len(params) { + die("-t given but no tag specified") + } + skipNext = true + buildTags = append(buildTags, strings.Split(params[i+1], " ")...) + case "-o", "--output": + skipNext = true + outputFilename = params[i+1] + case "--tempdir": + skipNext = true + tempdir = params[i+1] + case "-T", "--test": + runTests = true + case "--enable-cgo": + enableCGO = true + case "--enable-pie": + enablePIE = true + case "--goos": + skipNext = true + buildEnv["GOOS"] = params[i+1] + case "--goarch": + skipNext = true + buildEnv["GOARCH"] = params[i+1] + case "--goarm": + skipNext = true + buildEnv["GOARM"] = params[i+1] + case "-h": + showUsage(os.Stdout) + return + default: + fmt.Fprintf(os.Stderr, "Error: unknown option %q\n\n", arg) + showUsage(os.Stderr) + os.Exit(1) + } + } + + verbosePrintf("detected Go version %v\n", goVersion) + + for i := range buildTags { + buildTags[i] = strings.TrimSpace(buildTags[i]) + } + + verbosePrintf("build tags: %s\n", buildTags) + + root, err := os.Getwd() + if err != nil { + die("Getwd(): %v\n", err) + } + + if outputFilename == "" { + outputFilename = config.Name + if buildEnv["GOOS"] == "windows" { + outputFilename += ".exe" + } + } + + output := outputFilename + if !filepath.IsAbs(output) { + output = filepath.Join(root, output) + } + + version := getVersion() + constants := Constants{} + if version != "" { + constants["main.version"] = version + } + ldflags := "-s -w " + constants.LDFlags() + verbosePrintf("ldflags: %s\n", ldflags) + + var ( + buildArgs []string + testArgs []string + ) + + mainPackage := config.Main + if strings.HasPrefix(mainPackage, config.Namespace) { + mainPackage = strings.Replace(mainPackage, config.Namespace, "./", 1) + } + + buildTarget := filepath.FromSlash(mainPackage) + buildCWD := "" + + if goVersion.AtLeast(GoVersion{1, 11, 0}) && fileExists("go.mod") { + verbosePrintf("Go >= 1.11 and 'go.mod' found, building with modules\n") + buildCWD = root + + buildArgs = append(buildArgs, "-mod=vendor") + testArgs = append(testArgs, "-mod=vendor") + } else { + if tempdir == "" { + tempdir, err = ioutil.TempDir("", fmt.Sprintf("%v-build-", config.Name)) + if err != nil { + die("TempDir(): %v\n", err) + } + } + + verbosePrintf("Go < 1.11 or 'go.mod' not found, create GOPATH at %v\n", tempdir) + targetdir := filepath.Join(tempdir, "src", filepath.FromSlash(config.Namespace)) + if err = copy(targetdir, root); err != nil { + die("copying files from %v to %v/src failed: %v\n", root, tempdir, err) + } + + defer func() { + if !keepGopath { + verbosePrintf("remove %v\n", tempdir) + if err = os.RemoveAll(tempdir); err != nil { + die("remove GOPATH at %s failed: %v\n", tempdir, err) + } + } else { + verbosePrintf("leaving temporary GOPATH at %v\n", tempdir) + } + }() + + buildCWD = targetdir + + goEnv["GOPATH"] = tempdir + buildEnv["GOPATH"] = tempdir + } + + verbosePrintf("environment:\n go: %v\n build: %v\n", goEnv, buildEnv) + + buildArgs = append(buildArgs, + "-tags", strings.Join(buildTags, " "), + "-ldflags", ldflags, + "-o", output, buildTarget, + ) + + err = build(buildCWD, buildEnv, buildArgs...) + if err != nil { + die("build failed: %v\n", err) + } + + if runTests { + verbosePrintf("running tests\n") + + testArgs = append(testArgs, config.Tests...) + + err = test(buildCWD, goEnv, testArgs...) + if err != nil { + die("running tests failed: %v\n", err) + } + } +} diff --git a/changelog/0.6.0_2017-05-29/issue-953 b/changelog/0.6.0_2017-05-29/issue-953 new file mode 100644 index 000000000..04ac5cc18 --- /dev/null +++ b/changelog/0.6.0_2017-05-29/issue-953 @@ -0,0 +1,8 @@ +Enhancement: Make `forget` consistent + +The `forget` command was corrected to be more consistent in which snapshots are +to be forgotten. It is possible that the new code removes more snapshots than +before, so please review what would be deleted by using the `--dry-run` option. + +https://github.com/restic/restic/pull/957 +https://github.com/restic/restic/issues/953 diff --git a/changelog/0.6.0_2017-05-29/issue-965 b/changelog/0.6.0_2017-05-29/issue-965 new file mode 100644 index 000000000..089b39b16 --- /dev/null +++ b/changelog/0.6.0_2017-05-29/issue-965 @@ -0,0 +1,11 @@ +Enhancement: Unify repository layout for all backends + +Up to now the s3 backend used a special repository layout. We've decided to +unify the repository layout and implemented the default layout also for the s3 +backend. For creating a new repository on s3 with the default layout, use +`restic -o s3.layout=default init`. For further commands the option is not +necessary any more, restic will automatically detect the correct layout to use. +A future version will switch to the default layout for new repositories. + +https://github.com/restic/restic/pull/966 +https://github.com/restic/restic/issues/965 diff --git a/changelog/0.6.0_2017-05-29/pull-962 b/changelog/0.6.0_2017-05-29/pull-962 new file mode 100644 index 000000000..6bf14a48b --- /dev/null +++ b/changelog/0.6.0_2017-05-29/pull-962 @@ -0,0 +1,15 @@ +Enhancement: Improve memory and runtime for the s3 backend + +We've updated the library used for accessing s3, switched to using a lower +level API and added caching for some requests. This lead to a decrease in +memory usage and a great speedup. In addition, we added benchmark functions for +all backends, so we can track improvements over time. The Continuous +Integration test service we're using (Travis) now runs the s3 backend tests not +only against a Minio server, but also against the Amazon s3 live service, so we +should be notified of any regressions much sooner. + +https://github.com/restic/restic/pull/962 +https://github.com/restic/restic/pull/960 +https://github.com/restic/restic/pull/946 +https://github.com/restic/restic/pull/938 +https://github.com/restic/restic/pull/883 diff --git a/changelog/0.6.1_2017-06-01/issue-985 b/changelog/0.6.1_2017-06-01/issue-985 new file mode 100644 index 000000000..d20f20326 --- /dev/null +++ b/changelog/0.6.1_2017-06-01/issue-985 @@ -0,0 +1,8 @@ +Enhancement: Allow multiple parallel idle HTTP connections + +Backends based on HTTP now allow several idle connections in parallel. This +is especially important for the REST backend, which (when used with a local +server) may create a lot connections and exhaust available ports quickly. + +https://github.com/restic/restic/issues/985 +https://github.com/restic/restic/pull/986 diff --git a/changelog/0.6.1_2017-06-01/pull-891 b/changelog/0.6.1_2017-06-01/pull-891 new file mode 100644 index 000000000..1d07a8dbd --- /dev/null +++ b/changelog/0.6.1_2017-06-01/pull-891 @@ -0,0 +1,6 @@ +Enhancement: Remove temporary path from binary in `build.go` + +The `build.go` now strips the temporary directory used for compilation from +the binary. This is the first step in enabling reproducible builds. + +https://github.com/restic/restic/pull/981 diff --git a/changelog/0.6.1_2017-06-01/pull-974 b/changelog/0.6.1_2017-06-01/pull-974 new file mode 100644 index 000000000..1358e2339 --- /dev/null +++ b/changelog/0.6.1_2017-06-01/pull-974 @@ -0,0 +1,7 @@ +Enhancement: Remove regular status reports + +Regular status report: We've removed the status report that was printed +every 10 seconds when restic is run non-interactively. You can still force +reporting the current status by sending a `USR1` signal to the process. + +https://github.com/restic/restic/pull/974 diff --git a/changelog/0.7.0_2017-07-01/issue-1013 b/changelog/0.7.0_2017-07-01/issue-1013 new file mode 100644 index 000000000..5f9b7a786 --- /dev/null +++ b/changelog/0.7.0_2017-07-01/issue-1013 @@ -0,0 +1,9 @@ +Bugfix: Switch back to using the high-level minio-go API for s3 + +For the s3 backend we're back to using the high-level API the s3 client library +for uploading data, a few users reported dropped connections (which the library +will automatically retry now). + +https://github.com/restic/restic/issues/1013 +https://github.com/restic/restic/issues/1023 +https://github.com/restic/restic/pull/1025 diff --git a/changelog/0.7.0_2017-07-01/issue-1021 b/changelog/0.7.0_2017-07-01/issue-1021 new file mode 100644 index 000000000..da727269f --- /dev/null +++ b/changelog/0.7.0_2017-07-01/issue-1021 @@ -0,0 +1,7 @@ +Enhancement: Detect invalid backend name and print error + +restic now tries to detect when an invalid/unknown backend is used and +returns an error message. + +https://github.com/restic/restic/issues/1021 +https://github.com/restic/restic/pull/1070 diff --git a/changelog/0.7.0_2017-07-01/issue-1029 b/changelog/0.7.0_2017-07-01/issue-1029 new file mode 100644 index 000000000..eb1f700d5 --- /dev/null +++ b/changelog/0.7.0_2017-07-01/issue-1029 @@ -0,0 +1,8 @@ +Enhancement: Remove invalid pack files when `prune` is run + +The `prune` command has been improved and will now remove invalid pack files, +for example files that have not been uploaded completely because a backup was +interrupted. + +https://github.com/restic/restic/issues/1029 +https://github.com/restic/restic/pull/1036 diff --git a/changelog/0.7.0_2017-07-01/issue-512 b/changelog/0.7.0_2017-07-01/issue-512 new file mode 100644 index 000000000..4b1163bf7 --- /dev/null +++ b/changelog/0.7.0_2017-07-01/issue-512 @@ -0,0 +1,4 @@ +Enhancement: Add Backblaze B2 backend + +https://github.com/restic/restic/issues/512 +https://github.com/restic/restic/pull/978 diff --git a/changelog/0.7.0_2017-07-01/issue-636 b/changelog/0.7.0_2017-07-01/issue-636 new file mode 100644 index 000000000..d5e55b2ab --- /dev/null +++ b/changelog/0.7.0_2017-07-01/issue-636 @@ -0,0 +1,9 @@ +Enhancement: Add dirs `tags` and `hosts` to fuse mount + +The fuse mount now has two more directories: `tags` contains a subdir for +each tag, which in turn contains only the snapshots that have this tag. The +subdir `hosts` contains a subdir for each host that has a snapshot, and the +subdir contains the snapshots for that host. + +https://github.com/restic/restic/issues/636 +https://github.com/restic/restic/pull/1050 diff --git a/changelog/0.7.0_2017-07-01/issue-965 b/changelog/0.7.0_2017-07-01/issue-965 new file mode 100644 index 000000000..6fbbb8acf --- /dev/null +++ b/changelog/0.7.0_2017-07-01/issue-965 @@ -0,0 +1,8 @@ +Bugfix: Switch to `default` repo layout for the s3 backend + +The default layout for the s3 backend is now `default` (instead of `s3legacy`). +Also, there's a new `migrate` command to convert an existing repo, it can be +run like this: `restic migrate s3_layout` + +https://github.com/restic/restic/issues/965 +https://github.com/restic/restic/pull/1004 diff --git a/changelog/0.7.0_2017-07-01/issue-989 b/changelog/0.7.0_2017-07-01/issue-989 new file mode 100644 index 000000000..4e8da055b --- /dev/null +++ b/changelog/0.7.0_2017-07-01/issue-989 @@ -0,0 +1,8 @@ +Enhancement: Improve performance of the `find` command + +Improved performance for the `find` command: Restic recognizes paths it has +already checked for the files in question, so the number of backend requests +is reduced a lot. + +https://github.com/restic/restic/issues/989 +https://github.com/restic/restic/pull/993 diff --git a/changelog/0.7.0_2017-07-01/pull-975 b/changelog/0.7.0_2017-07-01/pull-975 new file mode 100644 index 000000000..24172ac64 --- /dev/null +++ b/changelog/0.7.0_2017-07-01/pull-975 @@ -0,0 +1,4 @@ +Enhancement: Add new backend for OpenStack Swift + +https://github.com/restic/restic/pull/975 +https://github.com/restic/restic/pull/648 diff --git a/changelog/0.7.0_2017-07-01/pull-998 b/changelog/0.7.0_2017-07-01/pull-998 new file mode 100644 index 000000000..0609030d4 --- /dev/null +++ b/changelog/0.7.0_2017-07-01/pull-998 @@ -0,0 +1,5 @@ +Enhancement: Improve performance of the fuse mount + +Listing directories which contain large files now is significantly faster. + +https://github.com/restic/restic/pull/998 diff --git a/changelog/0.7.1_2017-07-22/issue-1055 b/changelog/0.7.1_2017-07-22/issue-1055 new file mode 100644 index 000000000..d03ebf2ee --- /dev/null +++ b/changelog/0.7.1_2017-07-22/issue-1055 @@ -0,0 +1,10 @@ +Enhancement: Create subdirs below `data/` for local/sftp backends + +The local and sftp backends now create the subdirs below `data/` on +open/init. This way, restic makes sure that they always exist. This is +connected to an issue for the sftp server. + +https://github.com/restic/restic/issues/1055 +https://github.com/restic/rest-server/pull/11#issuecomment-309879710 +https://github.com/restic/restic/pull/1077 +https://github.com/restic/restic/pull/1105 diff --git a/changelog/0.7.1_2017-07-22/issue-1067 b/changelog/0.7.1_2017-07-22/issue-1067 new file mode 100644 index 000000000..e7acecf41 --- /dev/null +++ b/changelog/0.7.1_2017-07-22/issue-1067 @@ -0,0 +1,8 @@ +Enhancement: Allow loading credentials for s3 from IAM + +When no S3 credentials are specified in the environment variables, restic +now tries to load credentials from an IAM instance profile when the s3 +backend is used. + +https://github.com/restic/restic/issues/1067 +https://github.com/restic/restic/pull/1086 diff --git a/changelog/0.7.1_2017-07-22/issue-1073 b/changelog/0.7.1_2017-07-22/issue-1073 new file mode 100644 index 000000000..3a2bb4dd0 --- /dev/null +++ b/changelog/0.7.1_2017-07-22/issue-1073 @@ -0,0 +1,8 @@ +Enhancement: Add `migrate` cmd to migrate from `s3legacy` to `default` layout + +The `migrate` command for changing the `s3legacy` layout to the `default` layout +for s3 backends has been improved: It can now be restarted with `restic migrate +--force s3_layout` and automatically retries operations on error. + +https://github.com/restic/restic/issues/1073 +https://github.com/restic/restic/pull/1075 diff --git a/changelog/0.7.1_2017-07-22/issue-1081 b/changelog/0.7.1_2017-07-22/issue-1081 new file mode 100644 index 000000000..bef658956 --- /dev/null +++ b/changelog/0.7.1_2017-07-22/issue-1081 @@ -0,0 +1,4 @@ +Enhancement: Clarify semantic for `--tasg` for the `forget` command + +https://github.com/restic/restic/issues/1081 +https://github.com/restic/restic/pull/1090 diff --git a/changelog/0.7.1_2017-07-22/pull-1080 b/changelog/0.7.1_2017-07-22/pull-1080 new file mode 100644 index 000000000..3debc67aa --- /dev/null +++ b/changelog/0.7.1_2017-07-22/pull-1080 @@ -0,0 +1,5 @@ +Enhancement: Ignore chmod() errors on filesystems which do not support it + +https://github.com/restic/restic/pull/1080 +https://github.com/restic/restic/pull/1112 + diff --git a/changelog/0.7.1_2017-07-22/pull-1082 b/changelog/0.7.1_2017-07-22/pull-1082 new file mode 100644 index 000000000..e49a7d812 --- /dev/null +++ b/changelog/0.7.1_2017-07-22/pull-1082 @@ -0,0 +1,3 @@ +Enhancement: Print stats on SIGINFO on Darwin and FreeBSD (ctrl+t) + +https://github.com/restic/restic/pull/1082 diff --git a/changelog/0.7.1_2017-07-22/pull-1115 b/changelog/0.7.1_2017-07-22/pull-1115 new file mode 100644 index 000000000..580528ef9 --- /dev/null +++ b/changelog/0.7.1_2017-07-22/pull-1115 @@ -0,0 +1,9 @@ +Bugfix: Fix `prune`, only include existing files in indexes + +A bug was found (and corrected) in the index rebuilding after prune, which led +to indexes which include blobs that were not present in the repo any more. +There were already checks in place which detected this situation and aborted +with an error message. A new run of either `prune` or `rebuild-index` corrected +the index files. This is now fixed and a test has been added to detect this. + +https://github.com/restic/restic/pull/1115 diff --git a/changelog/0.7.2_2017-09-13/issue-1132 b/changelog/0.7.2_2017-09-13/issue-1132 new file mode 100644 index 000000000..1c40e84b7 --- /dev/null +++ b/changelog/0.7.2_2017-09-13/issue-1132 @@ -0,0 +1,8 @@ +Enhancement: Make `key` command always prompt for a password + +The `key` command now prompts for a password even if the original password +to access a repo has been specified via the `RESTIC_PASSWORD` environment +variable or a password file. + +https://github.com/restic/restic/issues/1132 +https://github.com/restic/restic/pull/1133 diff --git a/changelog/0.7.2_2017-09-13/issue-1167 b/changelog/0.7.2_2017-09-13/issue-1167 new file mode 100644 index 000000000..aff17ca22 --- /dev/null +++ b/changelog/0.7.2_2017-09-13/issue-1167 @@ -0,0 +1,8 @@ +Bugfix: Do not create a local repo unless `init` is used + +When a restic command other than `init` is used with a local repository and the +repository directory does not exist, restic creates the directory structure. +That's an error, only the `init` command should create the dir. + +https://github.com/restic/restic/issues/1167 +https://github.com/restic/restic/pull/1182 diff --git a/changelog/0.7.2_2017-09-13/issue-1179 b/changelog/0.7.2_2017-09-13/issue-1179 new file mode 100644 index 000000000..d86b353d1 --- /dev/null +++ b/changelog/0.7.2_2017-09-13/issue-1179 @@ -0,0 +1,4 @@ +Enhancement: Resolve name conflicts, append a counter + +https://github.com/restic/restic/issues/1179 +https://github.com/restic/restic/pull/1209 diff --git a/changelog/0.7.2_2017-09-13/issue-1208 b/changelog/0.7.2_2017-09-13/issue-1208 new file mode 100644 index 000000000..fdd9489d9 --- /dev/null +++ b/changelog/0.7.2_2017-09-13/issue-1208 @@ -0,0 +1,7 @@ +Enhancement: Add `--compact` to `snapshots` command + +The option `--compact` was added to the `snapshots` command to get a better +overview of the snapshots in a repo. It limits each snapshot to a single line. + +https://github.com/restic/restic/issues/1218 +https://github.com/restic/restic/pull/1223 diff --git a/changelog/0.7.2_2017-09-13/issue-317 b/changelog/0.7.2_2017-09-13/issue-317 new file mode 100644 index 000000000..710c131fa --- /dev/null +++ b/changelog/0.7.2_2017-09-13/issue-317 @@ -0,0 +1,10 @@ +Enhancement: Add `--exclude-caches` and `--exclude-if-present` + +A new option `--exclude-caches` was added that allows excluding cache +directories (that are tagged as such). This is a special case of a more generic +option `--exclude-if-present` which excludes a directory if a file with a +specific name (and contents) is present. + +https://github.com/restic/restic/issues/317 +https://github.com/restic/restic/pull/1170 +https://github.com/restic/restic/pull/1224 diff --git a/changelog/0.7.2_2017-09-13/issues-697 b/changelog/0.7.2_2017-09-13/issues-697 new file mode 100644 index 000000000..50bab3990 --- /dev/null +++ b/changelog/0.7.2_2017-09-13/issues-697 @@ -0,0 +1,4 @@ +Enhancement: Automatically generate man pages for all restic commands + +https://github.com/restic/restic/issues/697 +https://github.com/restic/restic/pull/1147 diff --git a/changelog/0.7.2_2017-09-13/pull-1044 b/changelog/0.7.2_2017-09-13/pull-1044 new file mode 100644 index 000000000..97250cf6b --- /dev/null +++ b/changelog/0.7.2_2017-09-13/pull-1044 @@ -0,0 +1,3 @@ +Enhancement: Improve `restore`, do not traverse/load excluded directories + +https://github.com/restic/restic/pull/1044 diff --git a/changelog/0.7.2_2017-09-13/pull-1061 b/changelog/0.7.2_2017-09-13/pull-1061 new file mode 100644 index 000000000..3ec247c67 --- /dev/null +++ b/changelog/0.7.2_2017-09-13/pull-1061 @@ -0,0 +1,3 @@ +Enhancement: Add Dockerfile and official Docker image + +https://github.com/restic/restic/pull/1061 diff --git a/changelog/0.7.2_2017-09-13/pull-1126 b/changelog/0.7.2_2017-09-13/pull-1126 new file mode 100644 index 000000000..fe6f83e4f --- /dev/null +++ b/changelog/0.7.2_2017-09-13/pull-1126 @@ -0,0 +1,7 @@ +Enhancement: Use the standard Go git repository layout, use `dep` for vendoring + +The git repository layout was changed to resemble the layout typically used in +Go projects, we're not using `gb` for building restic any more and vendoring +the dependencies is now taken care of by `dep`. + +https://github.com/restic/restic/pull/1126 diff --git a/changelog/0.7.2_2017-09-13/pull-1134 b/changelog/0.7.2_2017-09-13/pull-1134 new file mode 100644 index 000000000..c1cd9c5bd --- /dev/null +++ b/changelog/0.7.2_2017-09-13/pull-1134 @@ -0,0 +1,5 @@ +Enhancement: Add support for storing backups on Google Cloud Storage + +https://github.com/restic/restic/pull/1134 +https://github.com/restic/restic/pull/1052 +https://github.com/restic/restic/issues/211 diff --git a/changelog/0.7.2_2017-09-13/pull-1144 b/changelog/0.7.2_2017-09-13/pull-1144 new file mode 100644 index 000000000..46dff7338 --- /dev/null +++ b/changelog/0.7.2_2017-09-13/pull-1144 @@ -0,0 +1,3 @@ +Enhancement: Properly report errors when reading files with exclude patterns + +https://github.com/restic/restic/pull/1144 diff --git a/changelog/0.7.2_2017-09-13/pull-1149 b/changelog/0.7.2_2017-09-13/pull-1149 new file mode 100644 index 000000000..b271a6066 --- /dev/null +++ b/changelog/0.7.2_2017-09-13/pull-1149 @@ -0,0 +1,8 @@ +Enhancement: Add support for storing backups on Microsoft Azure Blob Storage + +The library we're using to access the service requires Go 1.8, so restic now +needs at least Go 1.8. + +https://github.com/restic/restic/pull/1149 +https://github.com/restic/restic/pull/1059 +https://github.com/restic/restic/issues/609 diff --git a/changelog/0.7.2_2017-09-13/pull-1164 b/changelog/0.7.2_2017-09-13/pull-1164 new file mode 100644 index 000000000..95a06b0a8 --- /dev/null +++ b/changelog/0.7.2_2017-09-13/pull-1164 @@ -0,0 +1,3 @@ +Bugfix: Make the `key remove` command behave as documented + +https://github.com/restic/restic/pull/1164 diff --git a/changelog/0.7.2_2017-09-13/pull-1191 b/changelog/0.7.2_2017-09-13/pull-1191 new file mode 100644 index 000000000..25bb4d3f5 --- /dev/null +++ b/changelog/0.7.2_2017-09-13/pull-1191 @@ -0,0 +1,8 @@ +Bugfix: Make sure to write profiling files on interrupt + +Since a few releases restic had the ability to write profiling files for memory +and CPU usage when `debug` is enabled. It was discovered that when restic is +interrupted (ctrl+c is pressed), the proper shutdown hook is not run. This is +now corrected. + +https://github.com/restic/restic/pull/1191 diff --git a/changelog/0.7.2_2017-09-13/pull-1196 b/changelog/0.7.2_2017-09-13/pull-1196 new file mode 100644 index 000000000..7a2520529 --- /dev/null +++ b/changelog/0.7.2_2017-09-13/pull-1196 @@ -0,0 +1,3 @@ +Enhancement: Add `--group-by` to `forget` command for flexible grouping + +https://github.com/restic/restic/pull/1196 diff --git a/changelog/0.7.2_2017-09-13/pull-1203 b/changelog/0.7.2_2017-09-13/pull-1203 new file mode 100644 index 000000000..44d15a97a --- /dev/null +++ b/changelog/0.7.2_2017-09-13/pull-1203 @@ -0,0 +1,5 @@ +Enhancement: Print stats on all BSD systems when SIGINFO (ctrl+t) is received + +https://github.com/restic/restic/pull/1203 +https://github.com/restic/restic/pull/1082#issuecomment-326279920 + diff --git a/changelog/0.7.2_2017-09-13/pull-1205 b/changelog/0.7.2_2017-09-13/pull-1205 new file mode 100644 index 000000000..be80c8217 --- /dev/null +++ b/changelog/0.7.2_2017-09-13/pull-1205 @@ -0,0 +1,3 @@ +Enhancement: Allow specifying time/date for a backup with `--time` + +https://github.com/restic/restic/pull/1205 diff --git a/changelog/0.7.3_2017-09-20/issue-1246 b/changelog/0.7.3_2017-09-20/issue-1246 new file mode 100644 index 000000000..550d71089 --- /dev/null +++ b/changelog/0.7.3_2017-09-20/issue-1246 @@ -0,0 +1,9 @@ +Bugfix: List all files stored in Google Cloud Storage + +For large backups stored in Google Cloud Storage, the `prune` command fails +because listing only returns the first 1000 files. This has been corrected, no +data is lost in the process. In addition, a plausibility check was added to +`prune`. + +https://github.com/restic/restic/issues/1246 +https://github.com/restic/restic/pull/1247 diff --git a/changelog/0.8.0_2017-11-26/issue-1102 b/changelog/0.8.0_2017-11-26/issue-1102 new file mode 100644 index 000000000..6c7f6ed57 --- /dev/null +++ b/changelog/0.8.0_2017-11-26/issue-1102 @@ -0,0 +1,9 @@ +Enhancement: Add subdirectory `ids` to fuse mount + +The fuse mount now has an `ids` subdirectory which contains the snapshots below +their (short) IDs. + +https://github.com/restic/restic/issues/1102 +https://github.com/restic/restic/pull/1299 +https://github.com/restic/restic/pull/1320 + diff --git a/changelog/0.8.0_2017-11-26/issue-1114 b/changelog/0.8.0_2017-11-26/issue-1114 new file mode 100644 index 000000000..8722a112c --- /dev/null +++ b/changelog/0.8.0_2017-11-26/issue-1114 @@ -0,0 +1,10 @@ +Enhancement: Add `--cacert` to specify TLS certificates to check against + +We've added the `--cacert` option which can be used to pass one (or more) CA +certificates to restic. These are used in addition to the system CA +certificates to verify HTTPS certificates (e.g. for the REST backend). + +https://github.com/restic/restic/issues/1114 +https://github.com/restic/restic/pull/1276 + + diff --git a/changelog/0.8.0_2017-11-26/issue-1216 b/changelog/0.8.0_2017-11-26/issue-1216 new file mode 100644 index 000000000..44fce448c --- /dev/null +++ b/changelog/0.8.0_2017-11-26/issue-1216 @@ -0,0 +1,9 @@ +Enhancement: Add upload/download limiting + +We've added support for rate limiting through `--limit-upload` and +`--limit-download` flags. + +https://github.com/restic/restic/issues/1216 +https://github.com/restic/restic/pull/1336 +https://github.com/restic/restic/pull/1358 + diff --git a/changelog/0.8.0_2017-11-26/issue-1256 b/changelog/0.8.0_2017-11-26/issue-1256 new file mode 100644 index 000000000..e42dffc29 --- /dev/null +++ b/changelog/0.8.0_2017-11-26/issue-1256 @@ -0,0 +1,7 @@ +Bugfix: Re-enable workaround for S3 backend + +We've re-enabled a workaround for `minio-go` (the library we're using to +access s3 backends), this reduces memory usage. + +https://github.com/restic/restic/issues/1256 +https://github.com/restic/restic/pull/1267 diff --git a/changelog/0.8.0_2017-11-26/issue-1271 b/changelog/0.8.0_2017-11-26/issue-1271 new file mode 100644 index 000000000..f818b4202 --- /dev/null +++ b/changelog/0.8.0_2017-11-26/issue-1271 @@ -0,0 +1,6 @@ +Enhancement: Cache results for excludes for `backup` + +The `backup` command now caches the result of excludes for a directory. + +https://github.com/restic/restic/issues/1271 +https://github.com/restic/restic/pull/1326 diff --git a/changelog/0.8.0_2017-11-26/issue-1274 b/changelog/0.8.0_2017-11-26/issue-1274 new file mode 100644 index 000000000..97f05e1dd --- /dev/null +++ b/changelog/0.8.0_2017-11-26/issue-1274 @@ -0,0 +1,9 @@ +Enhancement: Add `generate` command, replaces `manpage` and `autocomplete` + +The `generate` command has been added, which replaces the now removed +commands `manpage` and `autocomplete`. This release of restic contains the +most recent manpages in `doc/man` and the auto-completion files for bash and +zsh in `doc/bash-completion.sh` and `doc/zsh-completion.zsh` + +https://github.com/restic/restic/issues/1274 +https://github.com/restic/restic/pull/1282 diff --git a/changelog/0.8.0_2017-11-26/issue-1291 b/changelog/0.8.0_2017-11-26/issue-1291 new file mode 100644 index 000000000..04eff4e8c --- /dev/null +++ b/changelog/0.8.0_2017-11-26/issue-1291 @@ -0,0 +1,8 @@ +Bugfix: Reuse backend TCP connections to BackBlaze B2 + +A bug was discovered in the library we're using to access Backblaze, it now +reuses already established TCP connections which should be a lot faster and +not cause network failures any more. + +https://github.com/restic/restic/issues/1291 +https://github.com/restic/restic/pull/1301 diff --git a/changelog/0.8.0_2017-11-26/issue-1367 b/changelog/0.8.0_2017-11-26/issue-1367 new file mode 100644 index 000000000..5b1c3f211 --- /dev/null +++ b/changelog/0.8.0_2017-11-26/issue-1367 @@ -0,0 +1,7 @@ +Enhancement: Allow comments in files read from via `--file-from` + +When the list of files/dirs to be saved is read from a file with +`--files-from`, comment lines (starting with `#`) are now ignored. + +https://github.com/restic/restic/issues/1367 +https://github.com/restic/restic/pull/1368 diff --git a/changelog/0.8.0_2017-11-26/issue-1445 b/changelog/0.8.0_2017-11-26/issue-1445 new file mode 100644 index 000000000..7027e5cb6 --- /dev/null +++ b/changelog/0.8.0_2017-11-26/issue-1445 @@ -0,0 +1,18 @@ +Security: Prevent writing outside the target directory during restore + +A vulnerability was found in the restic restorer, which allowed attackers in +special circumstances to restore files to a location outside of the target +directory. Due to the circumstances we estimate this to be a low-risk +vulnerability, but urge all users to upgrade to the latest version of restic. + +Exploiting the vulnerability requires a Linux/Unix system which saves backups +via restic and a Windows systems which restores files from the repo. In +addition, the attackers need to be able to create create files with arbitrary +names which are then saved to the restic repo. For example, by creating a file +named "..\test.txt" (which is a perfectly legal filename on Linux) and +restoring a snapshot containing this file on Windows, it would be written to +the parent of the target directory. + +We'd like to thank Tyler Spivey for reporting this responsibly! + +https://github.com/restic/restic/pull/1445 diff --git a/changelog/0.8.0_2017-11-26/issue-448 b/changelog/0.8.0_2017-11-26/issue-448 new file mode 100644 index 000000000..535d5dd1e --- /dev/null +++ b/changelog/0.8.0_2017-11-26/issue-448 @@ -0,0 +1,9 @@ +Enhancement: sftp backend prompts for password + +The sftp backend now prompts for the password if a password is necessary for +login. + +https://github.com/restic/restic/issues/448 +https://github.com/restic/restic/pull/1270 + + diff --git a/changelog/0.8.0_2017-11-26/issue-510 b/changelog/0.8.0_2017-11-26/issue-510 new file mode 100644 index 000000000..702601e99 --- /dev/null +++ b/changelog/0.8.0_2017-11-26/issue-510 @@ -0,0 +1,7 @@ +Enhancement: Add `dump` command + +We've added the `dump` command which prints a file from a snapshot to +stdout. This can e.g. be used to restore files read with `backup --stdin`. + +https://github.com/restic/restic/issues/510 +https://github.com/restic/restic/pull/1346 diff --git a/changelog/0.8.0_2017-11-26/pull-1040 b/changelog/0.8.0_2017-11-26/pull-1040 new file mode 100644 index 000000000..b39ee2fee --- /dev/null +++ b/changelog/0.8.0_2017-11-26/pull-1040 @@ -0,0 +1,23 @@ +Enhancement: Add local metadata cache + +We've added a local cache for metadata so that restic doesn't need to load +all metadata (snapshots, indexes, ...) from the repo each time it starts. By +default the cache is active, but there's a new global option `--no-cache` +that can be used to disable the cache. By deafult, the cache a standard +cache folder for the OS, which can be overridden with `--cache-dir`. The +cache will automatically populate, indexes and snapshots are saved as they +are loaded. Cache directories for repos that haven't been used recently can +automatically be removed by restic with the `--cleanup-cache` option. + +A related change was to by default create pack files in the repo that contain +either data or metadata, not both mixed together. This allows easy caching of +only the metadata files. The next run of `restic prune` will untangle mixed +files automatically. + +https://github.com/restic/restic/pull/1040 +https://github.com/restic/restic/issues/29 +https://github.com/restic/restic/issues/738 +https://github.com/restic/restic/issues/282 +https://github.com/restic/restic/pull/1287 +https://github.com/restic/restic/pull/1436 +https://github.com/restic/restic/pull/1265 diff --git a/changelog/0.8.0_2017-11-26/pull-1249 b/changelog/0.8.0_2017-11-26/pull-1249 new file mode 100644 index 000000000..48db604b4 --- /dev/null +++ b/changelog/0.8.0_2017-11-26/pull-1249 @@ -0,0 +1,6 @@ +Enhancement: Add `latest` symlink in fuse mount + +The directory structure in the fuse mount now exposes a symlink `latest` +which points to the latest snapshot in that particular directory. + +https://github.com/restic/restic/pull/1249 diff --git a/changelog/0.8.0_2017-11-26/pull-1269 b/changelog/0.8.0_2017-11-26/pull-1269 new file mode 100644 index 000000000..fd36334bc --- /dev/null +++ b/changelog/0.8.0_2017-11-26/pull-1269 @@ -0,0 +1,6 @@ +Enhancement: Add `--compact` to `forget` command + +The option `--compact` was added to the `forget` command to provide the same +compact view as the `snapshots` command. + +https://github.com/restic/restic/pull/1269 diff --git a/changelog/0.8.0_2017-11-26/pull-1281 b/changelog/0.8.0_2017-11-26/pull-1281 new file mode 100644 index 000000000..1f5a8ab06 --- /dev/null +++ b/changelog/0.8.0_2017-11-26/pull-1281 @@ -0,0 +1,7 @@ +Enhancement: Google Cloud Storage backend needs less permissions + +The Google Cloud Storage backend no longer requires the service account to +have the `storage.buckets.get` permission ("Storage Admin" role) in `restic +init` if the bucket already exists. + +https://github.com/restic/restic/pull/1281 diff --git a/changelog/0.8.0_2017-11-26/pull-1317 b/changelog/0.8.0_2017-11-26/pull-1317 new file mode 100644 index 000000000..e3249c9ca --- /dev/null +++ b/changelog/0.8.0_2017-11-26/pull-1317 @@ -0,0 +1,7 @@ +Bugfix: Run prune when `forget --prune` is called with just snapshot IDs + +A bug in the `forget` command caused `prune` not to be run when `--prune` was +specified without a policy, e.g. when only snapshot IDs that should be +forgotten are listed manually. + +https://github.com/restic/restic/pull/1317 diff --git a/changelog/0.8.0_2017-11-26/pull-1319 b/changelog/0.8.0_2017-11-26/pull-1319 new file mode 100644 index 000000000..d74a3f947 --- /dev/null +++ b/changelog/0.8.0_2017-11-26/pull-1319 @@ -0,0 +1,8 @@ +Enhancement: Make `check` print `no errors found` explicitly + +The `check` command now explicetly prints `No errors were found` when no errors +could be found. + +https://github.com/restic/restic/pull/1319 +https://github.com/restic/restic/issues/1303 + diff --git a/changelog/0.8.0_2017-11-26/pull-1353 b/changelog/0.8.0_2017-11-26/pull-1353 new file mode 100644 index 000000000..ac3423329 --- /dev/null +++ b/changelog/0.8.0_2017-11-26/pull-1353 @@ -0,0 +1,3 @@ +Enhancement: Retry failed backend requests + +https://github.com/restic/restic/pull/1353 diff --git a/changelog/0.8.0_2017-11-26/pull-1437 b/changelog/0.8.0_2017-11-26/pull-1437 new file mode 100644 index 000000000..906504cdc --- /dev/null +++ b/changelog/0.8.0_2017-11-26/pull-1437 @@ -0,0 +1,10 @@ +Bugfix: Remove implicit path `/restic` for the s3 backend + +The s3 backend used the subdir `restic` within a bucket if no explicit path +after the bucket name was specified. Since this version, restic does not use +this default path any more. If you created a repo on s3 in a bucket without +specifying a path within the bucket, you need to add `/restic` at the end of +the repository specification to access your repo: `s3:s3.amazonaws.com/bucket/restic` + +https://github.com/restic/restic/pull/1437 +https://github.com/restic/restic/issues/1292 diff --git a/changelog/0.8.1_2017-12-27/issue-1457 b/changelog/0.8.1_2017-12-27/issue-1457 new file mode 100644 index 000000000..538a8a648 --- /dev/null +++ b/changelog/0.8.1_2017-12-27/issue-1457 @@ -0,0 +1,4 @@ +Bugfix: Improve s3 backend with DigitalOcean Spaces + +https://github.com/restic/restic/issues/1457 +https://github.com/restic/restic/pull/1459 diff --git a/changelog/0.8.1_2017-12-27/pull-1436 b/changelog/0.8.1_2017-12-27/pull-1436 new file mode 100644 index 000000000..4098575bd --- /dev/null +++ b/changelog/0.8.1_2017-12-27/pull-1436 @@ -0,0 +1,9 @@ +Enhancement: Add code to detect old cache directories + +We've added code to detect old cache directories of repositories that +haven't been used in a long time, restic now prints a note when it detects +that such dirs exist. Also, the option `--cleanup-cache` was added to +automatically remove such directories. That's not a problem because the +cache will be rebuild once a repo is accessed again. + +https://github.com/restic/restic/pull/1436 diff --git a/changelog/0.8.1_2017-12-27/pull-1439 b/changelog/0.8.1_2017-12-27/pull-1439 new file mode 100644 index 000000000..50c23a5ca --- /dev/null +++ b/changelog/0.8.1_2017-12-27/pull-1439 @@ -0,0 +1,6 @@ +Enhancement: Improve cancellation logic + +The cancellation logic was improved, restic can now shut down cleanly when +requested to do so (e.g. via ctrl+c). + +https://github.com/restic/restic/pull/1439 diff --git a/changelog/0.8.1_2017-12-27/pull-1452 b/changelog/0.8.1_2017-12-27/pull-1452 new file mode 100644 index 000000000..a3ee2eff6 --- /dev/null +++ b/changelog/0.8.1_2017-12-27/pull-1452 @@ -0,0 +1,9 @@ +Change: Do not save atime by default + +By default, the access time for files and dirs is not saved any more. It is +not possible to reliably disable updating the access time during a backup, +so for the next backup the access time is different again. This means a lot +of metadata is saved. If you want to save the access time anyway, pass +`--with-atime` to the `backup` command. + +https://github.com/restic/restic/pull/1452 diff --git a/changelog/0.8.1_2017-12-27/pull-1454 b/changelog/0.8.1_2017-12-27/pull-1454 new file mode 100644 index 000000000..f5f99614f --- /dev/null +++ b/changelog/0.8.1_2017-12-27/pull-1454 @@ -0,0 +1,6 @@ +Bugfix: Correct cache dir location for Windows and Darwin + +The cache directory on Windows and Darwin was not correct, instead the +directory `.cache` was used. + +https://github.com/restic/restic/pull/1454 diff --git a/changelog/0.8.1_2017-12-27/pull-1459 b/changelog/0.8.1_2017-12-27/pull-1459 new file mode 100644 index 000000000..ea9e55c70 --- /dev/null +++ b/changelog/0.8.1_2017-12-27/pull-1459 @@ -0,0 +1,9 @@ +Bugfix: Disable handling SIGPIPE + +We've disabled handling SIGPIPE again. Turns out, writing to broken TCP +connections also raised SIGPIPE, so restic exits on the first write to a +broken connection. Instead, restic should retry the request. + +https://github.com/restic/restic/pull/1459 +https://github.com/restic/restic/issues/1457 +https://github.com/restic/restic/issues/1466 diff --git a/changelog/0.8.1_2017-12-27/pull-1462 b/changelog/0.8.1_2017-12-27/pull-1462 new file mode 100644 index 000000000..9c83941d4 --- /dev/null +++ b/changelog/0.8.1_2017-12-27/pull-1462 @@ -0,0 +1,8 @@ +Enhancement: Add the `diff` command + +The command `diff` was added, it allows comparing two snapshots and listing +all differences. + +https://github.com/restic/restic/issues/11 +https://github.com/restic/restic/issues/1460 +https://github.com/restic/restic/pull/1462 diff --git a/changelog/0.8.2_2018-02-17/issue-1506 b/changelog/0.8.2_2018-02-17/issue-1506 new file mode 100644 index 000000000..5f0122529 --- /dev/null +++ b/changelog/0.8.2_2018-02-17/issue-1506 @@ -0,0 +1,4 @@ +Bugfix: Limit bandwith at the http.RoundTripper for HTTP based backends + +https://github.com/restic/restic/issues/1506 +https://github.com/restic/restic/pull/1511 diff --git a/changelog/0.8.2_2018-02-17/issue-1512 b/changelog/0.8.2_2018-02-17/issue-1512 new file mode 100644 index 000000000..4e30fbbf8 --- /dev/null +++ b/changelog/0.8.2_2018-02-17/issue-1512 @@ -0,0 +1,9 @@ +Bugfix: Restore directory permissions as the last step + +This change allows restoring into directories that were not writable during +backup. Before, restic created the directory, set the read-only mode and then +failed to create files in the directory. This change now restores the directory +(with its permissions) as the very last step. + +https://github.com/restic/restic/issues/1512 +https://github.com/restic/restic/pull/1536 diff --git a/changelog/0.8.2_2018-02-17/issue-1522 b/changelog/0.8.2_2018-02-17/issue-1522 new file mode 100644 index 000000000..57e0501c3 --- /dev/null +++ b/changelog/0.8.2_2018-02-17/issue-1522 @@ -0,0 +1,8 @@ +Enhancement: Add support for TLS client certificate authentication + +Support has been added for using a TLS client certificate for authentication to +HTTP based backend. A file containing the PEM encoded private key and +certificate can be set using the `--tls-client-cert` option. + +https://github.com/restic/restic/issues/1522 +https://github.com/restic/restic/pull/1524 diff --git a/changelog/0.8.2_2018-02-17/issue-1528 b/changelog/0.8.2_2018-02-17/issue-1528 new file mode 100644 index 000000000..db36f68c0 --- /dev/null +++ b/changelog/0.8.2_2018-02-17/issue-1528 @@ -0,0 +1,4 @@ +Bugfix: Correctly create missing subdirs in data/ + +https://github.com/restic/restic/issues/1528 +https://github.com/restic/restic/pull/1529 diff --git a/changelog/0.8.2_2018-02-17/issue-1541 b/changelog/0.8.2_2018-02-17/issue-1541 new file mode 100644 index 000000000..7cd4708ae --- /dev/null +++ b/changelog/0.8.2_2018-02-17/issue-1541 @@ -0,0 +1,7 @@ +Enhancement: Reduce number of remote requests during repository check + +This change eliminates redundant remote repository calls and significantly +improves repository check time. + +https://github.com/restic/restic/issues/1541 +https://github.com/restic/restic/pull/1548 diff --git a/changelog/0.8.2_2018-02-17/issue-1567 b/changelog/0.8.2_2018-02-17/issue-1567 new file mode 100644 index 000000000..e112608ef --- /dev/null +++ b/changelog/0.8.2_2018-02-17/issue-1567 @@ -0,0 +1,9 @@ +Enhancement: Reduce number of backend requests for rebuild-index and prune + +We've found a way to reduce then number of backend requests for the +`rebuild-index` and `prune` operations. This significantly speeds up the +operations for high-latency backends. + +https://github.com/restic/restic/issues/1567 +https://github.com/restic/restic/pull/1574 +https://github.com/restic/restic/pull/1575 diff --git a/changelog/0.8.2_2018-02-17/issue-1590 b/changelog/0.8.2_2018-02-17/issue-1590 new file mode 100644 index 000000000..47000b435 --- /dev/null +++ b/changelog/0.8.2_2018-02-17/issue-1590 @@ -0,0 +1,7 @@ +Bugfix: Strip spaces for lines read via --files-from + +Leading and trailing spaces in lines read via `--files-from` are now stripped, +so it behaves the same as with lines read via `--exclude-file`. + +https://github.com/restic/restic/issues/1590 +https://github.com/restic/restic/pull/1613 diff --git a/changelog/0.8.2_2018-02-17/pull-1507 b/changelog/0.8.2_2018-02-17/pull-1507 new file mode 100644 index 000000000..8daab8a8f --- /dev/null +++ b/changelog/0.8.2_2018-02-17/pull-1507 @@ -0,0 +1,3 @@ +Enhancement: Only reload snapshots once per minute for fuse mount + +https://github.com/restic/restic/pull/1507 diff --git a/changelog/0.8.2_2018-02-17/pull-1538 b/changelog/0.8.2_2018-02-17/pull-1538 new file mode 100644 index 000000000..0b1a131b8 --- /dev/null +++ b/changelog/0.8.2_2018-02-17/pull-1538 @@ -0,0 +1,7 @@ +Enhancement: Reduce memory allocations for querying the index + +This change reduces the internal memory allocations when the index data +structures in memory are queried if a blob (part of a file) already exists in +the repo. It should speed up backup a bit, and maybe even reduce RAM usage. + +https://github.com/restic/restic/pull/1538 diff --git a/changelog/0.8.2_2018-02-17/pull-1549 b/changelog/0.8.2_2018-02-17/pull-1549 new file mode 100644 index 000000000..8b6c60a87 --- /dev/null +++ b/changelog/0.8.2_2018-02-17/pull-1549 @@ -0,0 +1,7 @@ +Enhancement: Speed up querying across indices and scanning existing files + +This change increases the whenever a blob (part of a file) is searched for in a +restic repository. This will reduce cpu usage some when backing up files already +backed up by restic. Cpu usage is further decreased when scanning files. + +https://github.com/restic/restic/pull/1549 diff --git a/changelog/0.8.2_2018-02-17/pull-1554 b/changelog/0.8.2_2018-02-17/pull-1554 new file mode 100644 index 000000000..9a528084a --- /dev/null +++ b/changelog/0.8.2_2018-02-17/pull-1554 @@ -0,0 +1,7 @@ +Enhancement: fuse/mount: Correctly handle EOF, add template option + +We've added the `--snapshot-template` string, which can be used to specify a +template for a snapshot directory. In addition, accessing data after the end of +a file via the fuse mount is now handled correctly. + +https://github.com/restic/restic/pull/1554 diff --git a/changelog/0.8.2_2018-02-17/pull-1564 b/changelog/0.8.2_2018-02-17/pull-1564 new file mode 100644 index 000000000..52d513295 --- /dev/null +++ b/changelog/0.8.2_2018-02-17/pull-1564 @@ -0,0 +1,10 @@ +Enhancement: Don't terminate ssh on SIGINT + +We've reworked the code which runs the `ssh` login for the sftp backend so that +it can prompt for a password (if needed) but does not exit when the user +presses CTRL+C (SIGINT) e.g. during backup. This allows restic to properly shut +down when it receives SIGINT and remove the lock file from the repo, afterwards +exiting the `ssh` process. + +https://github.com/restic/restic/pull/1564 +https://github.com/restic/restic/pull/1588 diff --git a/changelog/0.8.2_2018-02-17/pull-1579 b/changelog/0.8.2_2018-02-17/pull-1579 new file mode 100644 index 000000000..5f86c43fe --- /dev/null +++ b/changelog/0.8.2_2018-02-17/pull-1579 @@ -0,0 +1,3 @@ +Enhancement: Retry Backend.List() in case of errors + +https://github.com/restic/restic/pull/1579 diff --git a/changelog/0.8.2_2018-02-17/pull-1584 b/changelog/0.8.2_2018-02-17/pull-1584 new file mode 100644 index 000000000..3553b41fd --- /dev/null +++ b/changelog/0.8.2_2018-02-17/pull-1584 @@ -0,0 +1,12 @@ +Enhancement: Limit index file size + +Before, restic would create a single new index file on `prune` or +`rebuild-index`, this may lead to memory problems when this huge index is +created and loaded again. We're now limiting the size of the index file, and +split newly created index files into several smaller ones. This allows restic +to be more memory-efficient. + +https://github.com/restic/restic/pull/1584 +https://github.com/restic/restic/issues/1412 +https://github.com/restic/restic/issues/979 +https://github.com/restic/restic/issues/526 diff --git a/changelog/0.8.2_2018-02-17/pull-1589 b/changelog/0.8.2_2018-02-17/pull-1589 new file mode 100644 index 000000000..33d013a5b --- /dev/null +++ b/changelog/0.8.2_2018-02-17/pull-1589 @@ -0,0 +1,17 @@ +Bugfix: Complete intermediate index upload + +After a user posted a comprehensive report of what he observed, we were able to +find a bug and correct it: During backup, restic uploads so-called +"intermediate" index files. When the backup finishes during a transfer of such +an intermediate index, the upload is cancelled, but the backup is finished +without an error. This leads to an inconsistent state, where the snapshot +references data that is contained in the repo, but is not referenced in any +index. + +The situation can be resolved by building a new index with `rebuild-index`, but +looks very confusing at first. Since all the data got uploaded to the repo +successfully, there was no risk of data loss, just minor inconvenience for our +users. + +https://github.com/restic/restic/pull/1589 +https://forum.restic.net/t/error-loading-tree-check-prune-and-forget-gives-error-b2-backend/406 diff --git a/changelog/0.8.2_2018-02-17/pull-1594 b/changelog/0.8.2_2018-02-17/pull-1594 new file mode 100644 index 000000000..b160baa70 --- /dev/null +++ b/changelog/0.8.2_2018-02-17/pull-1594 @@ -0,0 +1,7 @@ +Bugfix: Google Cloud Storage: Use generic HTTP transport + +It was discovered that the Google Cloud Storage backend did not use the generic +HTTP transport, so things such as bandwidth limiting with `--limit-upload` did +not work. This is resolved now. + +https://github.com/restic/restic/pull/1594 diff --git a/changelog/0.8.2_2018-02-17/pull-1595 b/changelog/0.8.2_2018-02-17/pull-1595 new file mode 100644 index 000000000..81e0a8748 --- /dev/null +++ b/changelog/0.8.2_2018-02-17/pull-1595 @@ -0,0 +1,11 @@ +Bugfix: backup: Remove bandwidth display + +This commit removes the bandwidth displayed during backup process. It is +misleading and seldomly correct, because it's neither the "read +bandwidth" (only for the very first backup) nor the "upload bandwidth". +Many users are confused about (and rightly so), c.f. #1581, #1033, #1591 + +We'll eventually replace this display with something more relevant when +the new archiver code is ready. + +https://github.com/restic/restic/pull/1595 diff --git a/changelog/0.8.3_2018-02-26/issue-1497 b/changelog/0.8.3_2018-02-26/issue-1497 new file mode 100644 index 000000000..cfec352e3 --- /dev/null +++ b/changelog/0.8.3_2018-02-26/issue-1497 @@ -0,0 +1,8 @@ +Enhancement: Add --read-data-subset flag to check command + +This change introduces ability to check integrity of a subset of repository +data packs. This can be used to spread integrity check of larger repositories +over a period of time. + +https://github.com/restic/restic/issues/1497 +https://github.com/restic/restic/pull/1556 diff --git a/changelog/0.8.3_2018-02-26/issue-1633 b/changelog/0.8.3_2018-02-26/issue-1633 new file mode 100644 index 000000000..5b0e05ab8 --- /dev/null +++ b/changelog/0.8.3_2018-02-26/issue-1633 @@ -0,0 +1,7 @@ +Bugfix: Fixed unexpected 'pack file cannot be listed' error + +Due to a regression introduced in 0.8.2, the `rebuild-index` and `prune` +commands failed to read pack files with size of 587, 588, 589 or 590 bytes. + +https://github.com/restic/restic/issues/1633 +https://github.com/restic/restic/pull/1635 diff --git a/changelog/0.8.3_2018-02-26/issue-1641 b/changelog/0.8.3_2018-02-26/issue-1641 new file mode 100644 index 000000000..09a886065 --- /dev/null +++ b/changelog/0.8.3_2018-02-26/issue-1641 @@ -0,0 +1,10 @@ +Bugfix: Ignore files with invalid names in the repo + +The release 0.8.2 introduced a bug: when restic encounters files in the repo +which do not have a valid name, it tries to load a file with a name of lots of +zeroes instead of ignoring it. This is now resolved, invalid file names are +just ignored. + +https://github.com/restic/restic/issues/1641 +https://github.com/restic/restic/pull/1643 +https://forum.restic.net/t/help-fixing-repo-no-such-file/485/3 diff --git a/changelog/0.8.3_2018-02-26/pull-1560 b/changelog/0.8.3_2018-02-26/pull-1560 new file mode 100644 index 000000000..0462442db --- /dev/null +++ b/changelog/0.8.3_2018-02-26/pull-1560 @@ -0,0 +1,5 @@ +Enhancement: Retry all repository file download errors + +Restic will now retry failed downloads, similar to other operations. + +https://github.com/restic/restic/pull/1560 diff --git a/changelog/0.8.3_2018-02-26/pull-1623 b/changelog/0.8.3_2018-02-26/pull-1623 new file mode 100644 index 000000000..0e03ee776 --- /dev/null +++ b/changelog/0.8.3_2018-02-26/pull-1623 @@ -0,0 +1,12 @@ +Enhancement: Don't check for presence of files in the backend before writing + +Before, all backend implementations were required to return an error if the +file that is to be written already exists in the backend. For most backends, +that means making a request (e.g. via HTTP) and returning an error when the +file already exists. + +This is not accurate, the file could have been created between the HTTP request +testing for it, and when writing starts, so we've relaxed this requeriment, +which saves one additional HTTP request per newly added file. + +https://github.com/restic/restic/pull/1623 diff --git a/changelog/0.8.3_2018-02-26/pull-1634 b/changelog/0.8.3_2018-02-26/pull-1634 new file mode 100644 index 000000000..3a2a8aa3d --- /dev/null +++ b/changelog/0.8.3_2018-02-26/pull-1634 @@ -0,0 +1,7 @@ +Enhancement: Upgrade B2 client library, reduce HTTP requests + +We've upgraded the B2 client library restic uses to access BackBlaze B2. This +reduces the number of HTTP requests needed to upload a new file from two to +one, which should improve throughput to B2. + +https://github.com/restic/restic/pull/1634 diff --git a/changelog/0.8.3_2018-02-26/pull-1638 b/changelog/0.8.3_2018-02-26/pull-1638 new file mode 100644 index 000000000..d2697b3f1 --- /dev/null +++ b/changelog/0.8.3_2018-02-26/pull-1638 @@ -0,0 +1,16 @@ +Bugfix: Handle errors listing files in the backend + +A user reported in the forum that restic completes a backup although a +concurrent `prune` operation was running. A few error messages were printed, +but the backup was attempted and completed successfully. No error code was +returned. + +This should not happen: The repository is exclusively locked during `prune`, so +when `restic backup` is run in parallel, it should abort and return an error +code instead. + +It was found that the bug was in the code introduced only recently, which +retries a List() operation on the backend should that fail. It is now corrected. + +https://github.com/restic/restic/pull/1638 +https://forum.restic.net/t/restic-backup-returns-0-exit-code-when-already-locked/484 diff --git a/changelog/0.9.0_2018-05-21/issue-1433 b/changelog/0.9.0_2018-05-21/issue-1433 new file mode 100644 index 000000000..c35d19b20 --- /dev/null +++ b/changelog/0.9.0_2018-05-21/issue-1433 @@ -0,0 +1,12 @@ +Enhancement: Support UTF-16 encoding and process Byte Order Mark + +On Windows, text editors commonly leave a Byte Order Mark at the beginning of +the file to define which encoding is used (oftentimes UTF-16). We've added code +to support processing the BOMs in text files, like the exclude files, the +password file and the file passed via `--files-from`. This does not apply to +any file being saved in a backup, those are not touched and archived as they +are. + +https://github.com/restic/restic/issues/1433 +https://github.com/restic/restic/issues/1738 +https://github.com/restic/restic/pull/1748 diff --git a/changelog/0.9.0_2018-05-21/issue-1561 b/changelog/0.9.0_2018-05-21/issue-1561 new file mode 100644 index 000000000..5cca7f9a5 --- /dev/null +++ b/changelog/0.9.0_2018-05-21/issue-1561 @@ -0,0 +1,10 @@ +Enhancement: Allow using rclone to access other services + +We've added the ability to use rclone to store backup data on all backends that +it supports. This was done in collaboration with Nick, the author of rclone. +You can now use it to first configure a service, then restic manages the rest +(starting and stopping rclone). For details, please see the manual. + +https://github.com/restic/restic/issues/1561 +https://github.com/restic/restic/pull/1657 +https://rclone.org diff --git a/changelog/0.9.0_2018-05-21/issue-1608 b/changelog/0.9.0_2018-05-21/issue-1608 new file mode 100644 index 000000000..f29812204 --- /dev/null +++ b/changelog/0.9.0_2018-05-21/issue-1608 @@ -0,0 +1,7 @@ +Bugfix: Respect time stamp for new backup when reading from stdin + +When reading backups from stdin (via `restic backup --stdin`), restic now uses +the time stamp for the new backup passed in `--time`. + +https://github.com/restic/restic/issues/1608 +https://github.com/restic/restic/pull/1703 diff --git a/changelog/0.9.0_2018-05-21/issue-1652 b/changelog/0.9.0_2018-05-21/issue-1652 new file mode 100644 index 000000000..aefad7912 --- /dev/null +++ b/changelog/0.9.0_2018-05-21/issue-1652 @@ -0,0 +1,9 @@ +Bugfix: Ignore/remove invalid lock files + +This corrects a bug introduced recently: When an invalid lock file in the repo +is encountered (e.g. if the file is empty), the code used to ignore that, but +now returns the error. Now, invalid files are ignored for the normal lock +check, and removed when `restic unlock --remove-all` is run. + +https://github.com/restic/restic/issues/1652 +https://github.com/restic/restic/pull/1653 diff --git a/changelog/0.9.0_2018-05-21/issue-1665 b/changelog/0.9.0_2018-05-21/issue-1665 new file mode 100644 index 000000000..ba956edfb --- /dev/null +++ b/changelog/0.9.0_2018-05-21/issue-1665 @@ -0,0 +1,27 @@ +Enhancement: Improve cache handling for `restic check` + +For safety reasons, restic does not use a local metadata cache for the `restic +check` command, so that data is loaded from the repository and restic can check +it's in good condition. When the cache is disabled, restic will fetch each tiny +blob needed for checking the integrity using a separate backend request. For +non-local backends, that will take a long time, and depending on the backend +(e.g. B2) may also be much more expensive. + +This PR adds a few commits which will change the behavior as follows: + + * When `restic check` is called without any additional parameters, it will + build a new cache in a temporary directory, which is removed at the end of + the check. This way, we'll get readahead for metadata files (so restic will + fetch the whole file when the first blob from the file is requested), but + all data is freshly fetched from the storage backend. This is the default + behavior and will work for almost all users. + + * When `restic check` is called with `--with-cache`, the default on-disc cache + is used. This behavior hasn't changed since the cache was introduced. + + * When `--no-cache` is specified, restic falls back to the old behavior, and + read all tiny blobs in separate requests. + +https://github.com/restic/restic/issues/1665 +https://github.com/restic/restic/issues/1694 +https://github.com/restic/restic/pull/1696 diff --git a/changelog/0.9.0_2018-05-21/issue-1721 b/changelog/0.9.0_2018-05-21/issue-1721 new file mode 100644 index 000000000..bf168369c --- /dev/null +++ b/changelog/0.9.0_2018-05-21/issue-1721 @@ -0,0 +1,8 @@ +Enhancement: Add `cache` command to list cache dirs + +The command `cache` was added, it allows listing restic's cache directoriers +together with the last usage. It also allows removing old cache dirs without +having to access a repo, via `restic cache --cleanup` + +https://github.com/restic/restic/issues/1721 +https://github.com/restic/restic/pull/1749 diff --git a/changelog/0.9.0_2018-05-21/issue-1730 b/changelog/0.9.0_2018-05-21/issue-1730 new file mode 100644 index 000000000..c22476b87 --- /dev/null +++ b/changelog/0.9.0_2018-05-21/issue-1730 @@ -0,0 +1,11 @@ +Bugfix: Ignore sockets for restore + +We've received a report and correct the behavior in which the restore code +aborted restoring a directory when a socket was encountered. Unix domain socket +files cannot be restored (they are created on the fly once a process starts +listening). The error handling was corrected, and in addition we're now +ignoring sockets during restore. + +https://github.com/restic/restic/issues/1730 +https://github.com/restic/restic/pull/1731 + diff --git a/changelog/0.9.0_2018-05-21/issue-1758 b/changelog/0.9.0_2018-05-21/issue-1758 new file mode 100644 index 000000000..e96b4469a --- /dev/null +++ b/changelog/0.9.0_2018-05-21/issue-1758 @@ -0,0 +1,8 @@ +Enhancement: Allow saving OneDrive folders in Windows + +Restic now contains a bugfix to two libraries, which allows saving OneDrive +folders in Windows. In order to use the newer versions of the libraries, the +minimal version required to compile restic is now Go 1.9. + +https://github.com/restic/restic/issues/1758 +https://github.com/restic/restic/pull/1765 diff --git a/changelog/0.9.0_2018-05-21/issue-549 b/changelog/0.9.0_2018-05-21/issue-549 new file mode 100644 index 000000000..e2dbc3706 --- /dev/null +++ b/changelog/0.9.0_2018-05-21/issue-549 @@ -0,0 +1,43 @@ +Enhancement: Rework archiver code + +The core archiver code and the complementary code for the `backup` command was +rewritten completely. This resolves very annoying issues such as 549. The first +backup with this release of restic will likely result in all files being +re-read locally, so it will take a lot longer. The next backup after that will +be fast again. + +Basically, with the old code, restic took the last path component of each +to-be-saved file or directory as the top-level file/directory within the +snapshot. This meant that when called as `restic backup /home/user/foo`, the +snapshot would contain the files in the directory `/home/user/foo` as `/foo`. + +This is not the case any more with the new archiver code. Now, restic works +very similar to what `tar` does: When restic is called with an absolute path to +save, then it'll preserve the directory structure within the snapshot. For the +example above, the snapshot would contain the files in the directory within +`/home/user/foo` in the snapshot. For relative directories, it only preserves +the relative path components. So `restic backup user/foo` will save the files +as `/user/foo` in the snapshot. + +While we were at it, the status display and notification system was completely +rewritten. By default, restic now shows which files are currently read (unless +`--quiet` is specified) in a multi-line status display. + +The `backup` command also gained a new option: `--verbose`. It can be specified +once (which prints a bit more detail what restic is doing) or twice (which +prints a line for each file/directory restic encountered, together with some +statistics). + +Another issue that was resolved is the new code only reads two files at most. +The old code would read way too many files in parallel, thereby slowing down +the backup process on spinning discs a lot. + +https://github.com/restic/restic/issues/549 +https://github.com/restic/restic/issues/1286 +https://github.com/restic/restic/issues/446 +https://github.com/restic/restic/issues/1344 +https://github.com/restic/restic/issues/1416 +https://github.com/restic/restic/issues/1456 +https://github.com/restic/restic/issues/1145 +https://github.com/restic/restic/issues/1160 +https://github.com/restic/restic/pull/1494 diff --git a/changelog/0.9.0_2018-05-21/pull-1552 b/changelog/0.9.0_2018-05-21/pull-1552 new file mode 100644 index 000000000..db9a4f042 --- /dev/null +++ b/changelog/0.9.0_2018-05-21/pull-1552 @@ -0,0 +1,13 @@ +Enhancement: Use Google Application Default credentials + +Google provide libraries to generate appropriate credentials with various +fallback sources. This change uses the library to generate our GCS client, which +allows us to make use of these extra methods. + +This should be backward compatible with previous restic behaviour while adding +the additional capabilities to auth from Google's internal metadata endpoints. +For users running restic in GCP this can make authentication far easier than it +was before. + +https://github.com/restic/restic/pull/1552 +https://developers.google.com/identity/protocols/application-default-credentials diff --git a/changelog/0.9.0_2018-05-21/pull-1647 b/changelog/0.9.0_2018-05-21/pull-1647 new file mode 100644 index 000000000..3fb356032 --- /dev/null +++ b/changelog/0.9.0_2018-05-21/pull-1647 @@ -0,0 +1,9 @@ +Enhancement: Accept AWS_SESSION_TOKEN for the s3 backend + +Before, it was not possible to use s3 backend with AWS temporary security +credentials(with AWS_SESSION_TOKEN). This change gives higher priority to +credentials.EnvAWS credentials provider. + +https://github.com/restic/restic/issues/1477 +https://github.com/restic/restic/pull/1479 +https://github.com/restic/restic/pull/1647 diff --git a/changelog/0.9.0_2018-05-21/pull-1648 b/changelog/0.9.0_2018-05-21/pull-1648 new file mode 100644 index 000000000..bbae0efda --- /dev/null +++ b/changelog/0.9.0_2018-05-21/pull-1648 @@ -0,0 +1,6 @@ +Enhancement: Ignore AWS permission denied error when creating a repository + +It's not possible to use s3 backend scoped to a subdirectory(with specific permissions). +Restic doesn't try to create repository in a subdirectory, when 'bucket exists' of parent directory check fails due to permission issues. + +https://github.com/restic/restic/pull/1648 diff --git a/changelog/0.9.0_2018-05-21/pull-1649 b/changelog/0.9.0_2018-05-21/pull-1649 new file mode 100644 index 000000000..d1224d700 --- /dev/null +++ b/changelog/0.9.0_2018-05-21/pull-1649 @@ -0,0 +1,3 @@ +Enhancement: Add illumos/Solaris support + +https://github.com/restic/restic/pull/1649 diff --git a/changelog/0.9.0_2018-05-21/pull-1684 b/changelog/0.9.0_2018-05-21/pull-1684 new file mode 100644 index 000000000..6e14f8650 --- /dev/null +++ b/changelog/0.9.0_2018-05-21/pull-1684 @@ -0,0 +1,6 @@ +Bugfix: Fix backend tests for rest-server + +The REST server for restic now requires an explicit parameter (`--no-auth`) if +no authentication should be allowed. This is fixed in the tests. + +https://github.com/restic/restic/pull/1684 diff --git a/changelog/0.9.0_2018-05-21/pull-1709 b/changelog/0.9.0_2018-05-21/pull-1709 new file mode 100644 index 000000000..331693e3b --- /dev/null +++ b/changelog/0.9.0_2018-05-21/pull-1709 @@ -0,0 +1,7 @@ +Enhancement: Improve messages `restic check` prints + +Some messages `restic check` prints are not really errors, so from now on +restic does not treat them as errors any more and exits cleanly. + +https://github.com/restic/restic/pull/1709 +https://forum.restic.net/t/what-is-the-standard-procedure-to-follow-if-a-backup-or-restore-is-interrupted/571/2 diff --git a/changelog/0.9.0_2018-05-21/pull-1720 b/changelog/0.9.0_2018-05-21/pull-1720 new file mode 100644 index 000000000..90f23add4 --- /dev/null +++ b/changelog/0.9.0_2018-05-21/pull-1720 @@ -0,0 +1,7 @@ +Enhancement: Add --new-password-file flag for non-interactive password changes + +This makes it possible to change a repository password without being prompted. + +https://github.com/restic/restic/issues/827 +https://github.com/restic/restic/pull/1720 +https://forum.restic.net/t/changing-repo-password-without-prompt/591 diff --git a/changelog/0.9.0_2018-05-21/pull-1735 b/changelog/0.9.0_2018-05-21/pull-1735 new file mode 100644 index 000000000..2cfd115d8 --- /dev/null +++ b/changelog/0.9.0_2018-05-21/pull-1735 @@ -0,0 +1,9 @@ +Enhancement: Allow keeping a time range of snaphots + +We've added the `--keep-within` option to the `forget` command. It instructs +restic to keep all snapshots within the given duration since the newest +snapshot. For example, running `restic forget --keep-within 5m7d` will keep all +snapshots which have been made in the five months and seven days since the +latest snapshot. + +https://github.com/restic/restic/pull/1735 diff --git a/changelog/0.9.0_2018-05-21/pull-1746 b/changelog/0.9.0_2018-05-21/pull-1746 new file mode 100644 index 000000000..c909c9c80 --- /dev/null +++ b/changelog/0.9.0_2018-05-21/pull-1746 @@ -0,0 +1,7 @@ +Bugfix: Correctly parse the argument to --tls-client-cert + +Previously, the --tls-client-cert method attempt to read ARGV[1] (hardcoded) +instead of the argument that was passed to it. This has been corrected. + +https://github.com/restic/restic/issues/1745 +https://github.com/restic/restic/pull/1746 diff --git a/changelog/0.9.0_2018-05-21/pull-1782 b/changelog/0.9.0_2018-05-21/pull-1782 new file mode 100644 index 000000000..ff65442ea --- /dev/null +++ b/changelog/0.9.0_2018-05-21/pull-1782 @@ -0,0 +1,7 @@ +Enhancement: Use default AWS credentials chain for S3 backend + +Adds support for file credentials to the S3 backend (e.g. ~/.aws/credentials), +and reorders the credentials chain for the S3 backend to match AWS's standard, +which is static credentials, env vars, credentials file, and finally remote. + +https://github.com/restic/restic/pull/1782 \ No newline at end of file diff --git a/changelog/0.9.1_2018-06-10/issue-1801 b/changelog/0.9.1_2018-06-10/issue-1801 new file mode 100644 index 000000000..adf5e1c65 --- /dev/null +++ b/changelog/0.9.1_2018-06-10/issue-1801 @@ -0,0 +1,9 @@ +Bugfix: Add limiting bandwidth to the rclone backend + +The rclone backend did not respect `--limit-upload` or `--limit-download`. +Oftentimes it's not necessary to use this, as the limiting in rclone itself +should be used because it gives much better results, but in case a remote +instance of rclone is used (e.g. called via ssh), it is still relevant to limit +the bandwidth from restic to rclone. + +https://github.com/restic/restic/issues/1801 diff --git a/changelog/0.9.1_2018-06-10/issue-1822 b/changelog/0.9.1_2018-06-10/issue-1822 new file mode 100644 index 000000000..4093c4353 --- /dev/null +++ b/changelog/0.9.1_2018-06-10/issue-1822 @@ -0,0 +1,9 @@ +Bugfix: Allow uploading large files to MS Azure + +Sometimes, restic creates files to be uploaded to the repository which are +quite large, e.g. when saving directories with many entries or very large +files. The MS Azure API does not allow uploading files larger that 256MiB +directly, rather restic needs to upload them in blocks of 100MiB. This is now +implemented. + +https://github.com/restic/restic/issues/1822 diff --git a/changelog/0.9.1_2018-06-10/issue-1825 b/changelog/0.9.1_2018-06-10/issue-1825 new file mode 100644 index 000000000..017d09162 --- /dev/null +++ b/changelog/0.9.1_2018-06-10/issue-1825 @@ -0,0 +1,12 @@ +Bugfix: Correct `find` to not skip snapshots + +Under certain circumstances, the `find` command was found to skip snapshots +containing directories with files to look for when the directories haven't been +modified at all, and were already printed as part of a different snapshot. This +is now corrected. + +In addition, we've switched to our own matching/pattern implementation, so now +things like `restic find "/home/user/foo/**/main.go"` are possible. + +https://github.com/restic/restic/issues/1825 +https://github.com/restic/restic/issues/1823 diff --git a/changelog/0.9.1_2018-06-10/issue-1833 b/changelog/0.9.1_2018-06-10/issue-1833 new file mode 100644 index 000000000..247807279 --- /dev/null +++ b/changelog/0.9.1_2018-06-10/issue-1833 @@ -0,0 +1,9 @@ +Bugfix: Fix caching files on error + +During `check` it may happen that different threads access the same file in the +backend, which is then downloaded into the cache only once. When that fails, +only the thread which is responsible for downloading the file signals the +correct error. The other threads just assume that the file has been downloaded +successfully and then get an error when they try to access the cached file. + +https://github.com/restic/restic/issues/1833 diff --git a/changelog/0.9.1_2018-06-10/issue-1834 b/changelog/0.9.1_2018-06-10/issue-1834 new file mode 100644 index 000000000..c0d8a9a3b --- /dev/null +++ b/changelog/0.9.1_2018-06-10/issue-1834 @@ -0,0 +1,8 @@ +Bugfix: Resolve deadlock + +When the "scanning" process restic runs to find out how much data there is does +not finish before the backup itself is done, restic stops doing anything. This +is resolved now. + +https://github.com/restic/restic/issues/1834 +https://github.com/restic/restic/pull/1835 diff --git a/changelog/0.9.2_2018-08-06/issue-1854 b/changelog/0.9.2_2018-08-06/issue-1854 new file mode 100644 index 000000000..0168102c3 --- /dev/null +++ b/changelog/0.9.2_2018-08-06/issue-1854 @@ -0,0 +1,16 @@ +Bugfix: Allow saving files/dirs on different fs with `--one-file-system` + +restic now allows saving files/dirs on a different file system in a subdir +correctly even when `--one-file-system` is specified. + +The first thing the restic archiver code does is to build a tree of the target +files/directories. If it detects that a parent directory is already included +(e.g. `restic backup /foo /foo/bar/baz`), it'll ignore the latter argument. + +Without `--one-file-system`, that's perfectly valid: If `/foo` is to be +archived, it will include `/foo/bar/baz`. But with `--one-file-system`, +`/foo/bar/baz` may reside on a different file system, so it won't be included +with `/foo`. + +https://github.com/restic/restic/issues/1854 +https://github.com/restic/restic/pull/1855 diff --git a/changelog/0.9.2_2018-08-06/issue-1870 b/changelog/0.9.2_2018-08-06/issue-1870 new file mode 100644 index 000000000..dba3af5a1 --- /dev/null +++ b/changelog/0.9.2_2018-08-06/issue-1870 @@ -0,0 +1,6 @@ +Bugfix: Fix restore with --include + +We fixed a bug which prevented restic to restore files with an include filter. + +https://github.com/restic/restic/issues/1870 +https://github.com/restic/restic/pull/1900 diff --git a/changelog/0.9.2_2018-08-06/issue-1880 b/changelog/0.9.2_2018-08-06/issue-1880 new file mode 100644 index 000000000..cd144e40a --- /dev/null +++ b/changelog/0.9.2_2018-08-06/issue-1880 @@ -0,0 +1,12 @@ +Bugfix: Use `--cache-dir` argument for `check` command + +`check` command now uses a temporary sub-directory of the specified directory +if set using the `--cache-dir` argument. If not set, the cache directory is +created in the default temporary directory as before. +In either case a temporary cache is used to ensure the actual repository is +checked (rather than a local copy). + +The `--cache-dir` argument was not used by the `check` command, instead a +cache directory was created in the temporary directory. + +https://github.com/restic/restic/issues/1880 diff --git a/changelog/0.9.2_2018-08-06/issue-1893 b/changelog/0.9.2_2018-08-06/issue-1893 new file mode 100644 index 000000000..efe39c2b6 --- /dev/null +++ b/changelog/0.9.2_2018-08-06/issue-1893 @@ -0,0 +1,8 @@ +Bugfix: Return error when exclude file cannot be read + +A bug was found: when multiple exclude files were passed to restic and one of +them could not be read, an error was printed and restic continued, ignoring +even the existing exclude files. Now, an error message is printed and restic +aborts when an exclude file cannot be read. + +https://github.com/restic/restic/issues/1893 diff --git a/changelog/0.9.2_2018-08-06/issue-1906 b/changelog/0.9.2_2018-08-06/issue-1906 new file mode 100644 index 000000000..2122ed3b8 --- /dev/null +++ b/changelog/0.9.2_2018-08-06/issue-1906 @@ -0,0 +1,8 @@ +Enhancement: Add support for B2 application keys + +Restic can now use so-called "application keys" which can be created in the B2 +dashboard and were only introduced recently. In contrast to the "master key", +such keys can be restricted to a specific bucket and/or path. + +https://github.com/restic/restic/issues/1906 +https://github.com/restic/restic/pull/1914 diff --git a/changelog/0.9.2_2018-08-06/pull-1729 b/changelog/0.9.2_2018-08-06/pull-1729 new file mode 100644 index 000000000..62e982e95 --- /dev/null +++ b/changelog/0.9.2_2018-08-06/pull-1729 @@ -0,0 +1,4 @@ +Enhancement: Add stats command to get information about a repository + +https://github.com/restic/restic/issues/874 +https://github.com/restic/restic/pull/1729 diff --git a/changelog/0.9.2_2018-08-06/pull-1772 b/changelog/0.9.2_2018-08-06/pull-1772 new file mode 100644 index 000000000..912092455 --- /dev/null +++ b/changelog/0.9.2_2018-08-06/pull-1772 @@ -0,0 +1,6 @@ +Enhancement: Add restore --verify to verify restored file content + +Restore will print error message if restored file content does not match +expected SHA256 checksum + +https://github.com/restic/restic/pull/1772 diff --git a/changelog/0.9.2_2018-08-06/pull-1853 b/changelog/0.9.2_2018-08-06/pull-1853 new file mode 100644 index 000000000..cf23da820 --- /dev/null +++ b/changelog/0.9.2_2018-08-06/pull-1853 @@ -0,0 +1,6 @@ +Enhancement: Add JSON output support to `restic key list` + +This PR enables users to get the output of `restic key list` in JSON in addition +to the existing table format. + +https://github.com/restic/restic/pull/1853 \ No newline at end of file diff --git a/changelog/0.9.2_2018-08-06/pull-1861 b/changelog/0.9.2_2018-08-06/pull-1861 new file mode 100644 index 000000000..2ccb4d5f6 --- /dev/null +++ b/changelog/0.9.2_2018-08-06/pull-1861 @@ -0,0 +1,6 @@ +Bugfix: Fix case-insensitive search with restic find + +We've fixed the behavior for `restic find -i PATTERN`, which was +broken in v0.9.1. + +https://github.com/restic/restic/pull/1861 diff --git a/changelog/0.9.2_2018-08-06/pull-1882 b/changelog/0.9.2_2018-08-06/pull-1882 new file mode 100644 index 000000000..c1a7aad02 --- /dev/null +++ b/changelog/0.9.2_2018-08-06/pull-1882 @@ -0,0 +1,8 @@ +Enhancement: S3 backend: accept AWS_SESSION_TOKEN + +Before, it was not possible to use s3 backend with AWS temporary security credentials(with AWS_SESSION_TOKEN). +This change gives higher priority to credentials.EnvAWS credentials provider. + +https://github.com/restic/restic/issues/1477 +https://github.com/restic/restic/pull/1479 +https://github.com/restic/restic/pull/1647 diff --git a/changelog/0.9.2_2018-08-06/pull-1901 b/changelog/0.9.2_2018-08-06/pull-1901 new file mode 100644 index 000000000..7d9569d17 --- /dev/null +++ b/changelog/0.9.2_2018-08-06/pull-1901 @@ -0,0 +1,9 @@ +Enhancement: Update the Backblaze B2 library + +We've updated the library we're using for accessing the Backblaze B2 service to +0.5.0 to include support for upcoming so-called "application keys". With this +feature, you can create access credentials for B2 which are restricted to e.g. +a single bucket or even a sub-directory of a bucket. + +https://github.com/restic/restic/pull/1901 +https://github.com/kurin/blazer diff --git a/changelog/0.9.3_2018-10-13/issue-1766 b/changelog/0.9.3_2018-10-13/issue-1766 new file mode 100644 index 000000000..6d8e312dd --- /dev/null +++ b/changelog/0.9.3_2018-10-13/issue-1766 @@ -0,0 +1,7 @@ +Enhancement: restore: suppress lchown errors when not running as root + +Like "cp" and "rsync" do, restic now only reports errors for changing +the ownership of files during restore if it is run as root, on non-Windows +operating systems. On Windows, the error is reported as usual. + +https://github.com/restic/restic/issues/1766 diff --git a/changelog/0.9.3_2018-10-13/issue-1909 b/changelog/0.9.3_2018-10-13/issue-1909 new file mode 100644 index 000000000..869d532bf --- /dev/null +++ b/changelog/0.9.3_2018-10-13/issue-1909 @@ -0,0 +1,14 @@ +Enhancement: Reject files/dirs by name first + +The current scanner/archiver code had an architectural limitation: it always +ran the `lstat()` system call on all files and directories before a decision to +include/exclude the file/dir was made. This lead to a lot of unnecessary system +calls for items that could have been rejected by their name or path only. + +We've changed the archiver/scanner implementation so that it now first rejects +by name/path, and only runs the system call on the remaining items. This +reduces the number of `lstat()` system calls a lot (depending on the exclude +settings). + +https://github.com/restic/restic/issues/1909 +https://github.com/restic/restic/pull/1912 diff --git a/changelog/0.9.3_2018-10-13/issue-1935 b/changelog/0.9.3_2018-10-13/issue-1935 new file mode 100644 index 000000000..4edc64a92 --- /dev/null +++ b/changelog/0.9.3_2018-10-13/issue-1935 @@ -0,0 +1,8 @@ +Bugfix: Remove truncated files from cache + +When a file in the local cache is truncated, and restic tries to access data +beyond the end of the (cached) file, it used to return an error "EOF". This is +now fixed, such truncated files are removed and the data is fetched directly +from the backend. + +https://github.com/restic/restic/issues/1935 diff --git a/changelog/0.9.3_2018-10-13/issue-1941 b/changelog/0.9.3_2018-10-13/issue-1941 new file mode 100644 index 000000000..d2a70a02d --- /dev/null +++ b/changelog/0.9.3_2018-10-13/issue-1941 @@ -0,0 +1,15 @@ +Enhancement: Add directory filter to ls command + +The ls command can now be filtered by directories, so that only files in the +given directories will be shown. If the --recursive flag is specified, then +ls will traverse subfolders and list their files as well. + +It used to be possible to specify multiple snapshots, but that has been +replaced by only one snapshot and the possibility of specifying multiple +directories. + +Specifying directories constrains the walk, which can significantly speed up +the listing. + +https://github.com/restic/restic/issues/1940 +https://github.com/restic/restic/pull/1941 diff --git a/changelog/0.9.3_2018-10-13/issue-1967 b/changelog/0.9.3_2018-10-13/issue-1967 new file mode 100644 index 000000000..4be67cf78 --- /dev/null +++ b/changelog/0.9.3_2018-10-13/issue-1967 @@ -0,0 +1,7 @@ +Enhancement: Use `--host` everywhere + +We now use the flag `--host` for all commands which need a host name, using +`--hostname` (e.g. for `restic backup`) still works, but will print a +deprecation warning. Also, add the short option `-H` where possible. + +https://github.com/restic/restic/issues/1967 diff --git a/changelog/0.9.3_2018-10-13/issue-1978 b/changelog/0.9.3_2018-10-13/issue-1978 new file mode 100644 index 000000000..10cda710c --- /dev/null +++ b/changelog/0.9.3_2018-10-13/issue-1978 @@ -0,0 +1,12 @@ +Bugfix: Do not return an error when the scanner is slower than backup + +When restic makes a backup, there's a background task called "scanner" which +collects information on how many files and directories are to be saved, in +order to display progress information to the user. When the backup finishes +faster than the scanner, it is aborted because the result is not needed any +more. This logic contained a bug, where quitting the scanner process was +treated as an error, and caused restic to print an unhelpful error message +("context canceled"). + +https://github.com/restic/restic/issues/1978 +https://github.com/restic/restic/pull/1991 diff --git a/changelog/0.9.3_2018-10-13/issue-2028 b/changelog/0.9.3_2018-10-13/issue-2028 new file mode 100644 index 000000000..43fcfbc3c --- /dev/null +++ b/changelog/0.9.3_2018-10-13/issue-2028 @@ -0,0 +1,7 @@ +Enhancement: Display size of cache directories + +The `cache` command now by default shows the size of the individual cache +directories. It can be disabled with `--no-size`. + +https://github.com/restic/restic/issues/2028 +https://github.com/restic/restic/pull/2033 diff --git a/changelog/0.9.3_2018-10-13/pull-1780 b/changelog/0.9.3_2018-10-13/pull-1780 new file mode 100644 index 000000000..b4fb2cdbe --- /dev/null +++ b/changelog/0.9.3_2018-10-13/pull-1780 @@ -0,0 +1,13 @@ +Enhancement: Improve the `find` command + +We've updated the `find` command to support multiple patterns. + +`restic find` is now able to list the snapshots containing a specific tree +or blob, or even the snapshots that contain blobs belonging to a given pack. +A list of IDs can be given, as long as they all have the same type. + +The command `find` can also display the pack IDs the blobs belong to, if +the `--show-pack-id` flag is provided. + +https://github.com/restic/restic/issues/1777 +https://github.com/restic/restic/pull/1780 diff --git a/changelog/0.9.3_2018-10-13/pull-1876 b/changelog/0.9.3_2018-10-13/pull-1876 new file mode 100644 index 000000000..2fb1a8ea8 --- /dev/null +++ b/changelog/0.9.3_2018-10-13/pull-1876 @@ -0,0 +1,7 @@ +Enhancement: Display reason why forget keeps snapshots + +We've added a column to the list of snapshots `forget` keeps which details the +reasons to keep a particuliar snapshot. This makes debugging policies for +forget much easier. Please remember to always try things out with `--dry-run`! + +https://github.com/restic/restic/pull/1876 diff --git a/changelog/0.9.3_2018-10-13/pull-1891 b/changelog/0.9.3_2018-10-13/pull-1891 new file mode 100644 index 000000000..b70900a93 --- /dev/null +++ b/changelog/0.9.3_2018-10-13/pull-1891 @@ -0,0 +1,7 @@ +Enhancement: Accept glob in paths loaded via --files-from + +Before that, behaviour was different if paths were appended to command line or +from a file, because wild card characters were expanded by shell if appended to +command line, but not expanded if loaded from file. + +https://github.com/restic/restic/issues/1891 diff --git a/changelog/0.9.3_2018-10-13/pull-1920 b/changelog/0.9.3_2018-10-13/pull-1920 new file mode 100644 index 000000000..e253d7797 --- /dev/null +++ b/changelog/0.9.3_2018-10-13/pull-1920 @@ -0,0 +1,8 @@ +Enhancement: Vendor dependencies with Go 1.11 Modules + +Until now, we've used `dep` for managing dependencies, we've now switch to +using Go modules. For users this does not change much, only if you want to +compile restic without downloading anything with Go 1.11, then you need to run: +`go build -mod=vendor build.go` + +https://github.com/restic/restic/pull/1920 diff --git a/changelog/0.9.3_2018-10-13/pull-1949 b/changelog/0.9.3_2018-10-13/pull-1949 new file mode 100644 index 000000000..fd2b2e1b3 --- /dev/null +++ b/changelog/0.9.3_2018-10-13/pull-1949 @@ -0,0 +1,15 @@ +Enhancement: Add new command `self-update` + +We have added a new command called `self-update` which downloads the +latest released version of restic from GitHub and replaces the current +binary with it. It does not rely on any external program (so it'll work +everywhere), but still verifies the GPG signature using the embedded GPG +public key. + +By default, the `self-update` command is hidden behind the `selfupdate` +built tag, which is only set when restic is built using `build.go` (including +official releases). The reason for this is that downstream distributions will +then not include the command by default, so users are encouraged to use the +platform-specific distribution mechanism. + +https://github.com/restic/restic/pull/1949 diff --git a/changelog/0.9.3_2018-10-13/pull-1953 b/changelog/0.9.3_2018-10-13/pull-1953 new file mode 100644 index 000000000..e22c4b22d --- /dev/null +++ b/changelog/0.9.3_2018-10-13/pull-1953 @@ -0,0 +1,7 @@ +Enhancement: ls: Add JSON output support for restic ls cmd + +We've implemented listing files in the repository with JSON as output, just +pass `--json` as an option to `restic ls`. This makes the output of the command +machine readable. + +https://github.com/restic/restic/pull/1953 diff --git a/changelog/0.9.3_2018-10-13/pull-1962 b/changelog/0.9.3_2018-10-13/pull-1962 new file mode 100644 index 000000000..e3fde4c48 --- /dev/null +++ b/changelog/0.9.3_2018-10-13/pull-1962 @@ -0,0 +1,13 @@ +Enhancement: Stream JSON output for ls command + +The `ls` command now supports JSON output with the global `--json` +flag, and this change streams out JSON messages one object at a time +rather than en entire array buffered in memory before encoding. The +advantage is it allows large listings to be handled efficiently. + +Two message types are printed: snapshots and nodes. A snapshot +object will precede node objects which belong to that snapshot. +The `struct_type` field can be used to determine which kind of +message an object is. + +https://github.com/restic/restic/pull/1962 diff --git a/changelog/0.9.4_2019-01-06/issue-1605 b/changelog/0.9.4_2019-01-06/issue-1605 new file mode 100644 index 000000000..bc78a6d98 --- /dev/null +++ b/changelog/0.9.4_2019-01-06/issue-1605 @@ -0,0 +1,11 @@ +Enhancement: Concurrent restore + +This change significantly improves restore performance, especially +when using high-latency remote repositories like B2. + +The implementation now uses several concurrent threads to download and process +multiple remote files concurrently. To further reduce restore time, each remote +file is downloaded using a single repository request. + +https://github.com/restic/restic/issues/1605 +https://github.com/restic/restic/pull/1719 diff --git a/changelog/0.9.4_2019-01-06/issue-1989 b/changelog/0.9.4_2019-01-06/issue-1989 new file mode 100644 index 000000000..34c2543a0 --- /dev/null +++ b/changelog/0.9.4_2019-01-06/issue-1989 @@ -0,0 +1,7 @@ +Bugfix: Google Cloud Storage: Respect bandwidth limit + +The GCS backend did not respect the bandwidth limit configured, a previous +commit accidentally removed support for it. + +https://github.com/restic/restic/issues/1989 +https://github.com/restic/restic/pull/2100 diff --git a/changelog/0.9.4_2019-01-06/issue-2040 b/changelog/0.9.4_2019-01-06/issue-2040 new file mode 100644 index 000000000..b80db6319 --- /dev/null +++ b/changelog/0.9.4_2019-01-06/issue-2040 @@ -0,0 +1,11 @@ +Bugfix: Add host name filter shorthand flag for `stats` command + +The default value for `--host` flag was set to 'H' (the shorthand version of +the flag), this caused the lookup for the latest snapshot to fail. + +Add shorthand flag `-H` for `--host` (with empty default so if these flags +are not specified the latest snapshot will not filter by host name). + +Also add shorthand `-H` for `backup` command. + +https://github.com/restic/restic/issues/2040 diff --git a/changelog/0.9.4_2019-01-06/issue-2089 b/changelog/0.9.4_2019-01-06/issue-2089 new file mode 100644 index 000000000..d867c4fa4 --- /dev/null +++ b/changelog/0.9.4_2019-01-06/issue-2089 @@ -0,0 +1,9 @@ +Enhancement: increase granularity of the "keep within" retention policy + +The `keep-within` option of the `forget` command now accepts time ranges with +an hourly granularity. For example, running `restic forget --keep-within 3d12h` +will keep all the snapshots made within three days and twelve hours from the +time of the latest snapshot. + +https://github.com/restic/restic/issues/2089 +https://github.com/restic/restic/pull/2090 diff --git a/changelog/0.9.4_2019-01-06/issue-2097 b/changelog/0.9.4_2019-01-06/issue-2097 new file mode 100644 index 000000000..14282b471 --- /dev/null +++ b/changelog/0.9.4_2019-01-06/issue-2097 @@ -0,0 +1,12 @@ +Enhancement: Add key hinting + +Added a new option `--key-hint` and corresponding environment variable +`RESTIC_KEY_HINT`. The key hint is a key ID to try decrypting first, before +other keys in the repository. + +This change will benefit repositories with many keys; if the correct key hint +is supplied then restic only needs to check one key. If the key hint is +incorrect (the key does not exist, or the password is incorrect) then restic +will check all keys, as usual. + +https://github.com/restic/restic/issues/2097 diff --git a/changelog/0.9.4_2019-01-06/pull-2017 b/changelog/0.9.4_2019-01-06/pull-2017 new file mode 100644 index 000000000..44afba625 --- /dev/null +++ b/changelog/0.9.4_2019-01-06/pull-2017 @@ -0,0 +1,11 @@ +Enhancement: mount: Enforce FUSE Unix permissions with allow-other + +The fuse mount (`restic mount`) now lets the kernel check the permissions of +the files within snapshots (this is done through the `DefaultPermissions` FUSE +option) when the option `--allow-other` is specified. + +To restore the old behavior, we've added the `--no-default-permissions` option. +This allows all users that have access to the mount point to access all +files within the snapshots. + +https://github.com/restic/restic/pull/2017 diff --git a/changelog/0.9.4_2019-01-06/pull-2068 b/changelog/0.9.4_2019-01-06/pull-2068 new file mode 100644 index 000000000..1b8853943 --- /dev/null +++ b/changelog/0.9.4_2019-01-06/pull-2068 @@ -0,0 +1,6 @@ +Bugfix: Correctly return error loading data + +In one case during `prune` and `check`, an error loading data from the backend is not returned properly. This is now corrected. + +https://github.com/restic/restic/pull/2068 +https://github.com/restic/restic/issues/1999#issuecomment-433737921 diff --git a/changelog/0.9.4_2019-01-06/pull-2070 b/changelog/0.9.4_2019-01-06/pull-2070 new file mode 100644 index 000000000..30bd0fdd9 --- /dev/null +++ b/changelog/0.9.4_2019-01-06/pull-2070 @@ -0,0 +1,7 @@ +Enhancement: Make all commands display timestamps in local time + +Restic used to drop the timezone information from displayed timestamps, it now +converts timestamps to local time before printing them so the times can be +easily compared to. + +https://github.com/restic/restic/pull/2070 diff --git a/changelog/0.9.4_2019-01-06/pull-2086 b/changelog/0.9.4_2019-01-06/pull-2086 new file mode 100644 index 000000000..5c033e750 --- /dev/null +++ b/changelog/0.9.4_2019-01-06/pull-2086 @@ -0,0 +1,7 @@ +Enhancement: Allow --files-from to be specified multiple times + +Before, restic took only the last file specified with `--files-from` into +account, this is now corrected. + +https://github.com/restic/restic/issues/2085 +https://github.com/restic/restic/pull/2086 diff --git a/changelog/0.9.4_2019-01-06/pull-2094 b/changelog/0.9.4_2019-01-06/pull-2094 new file mode 100644 index 000000000..c4e8d2155 --- /dev/null +++ b/changelog/0.9.4_2019-01-06/pull-2094 @@ -0,0 +1,8 @@ +Enhancement: Run command to get password + +We've added the `--password-command` option which allows specifying a command +that restic runs every time the password for the repository is needed, so it +can be integrated with a password manager or keyring. The option can also be +set via the environment variable `$RESTIC_PASSWORD_COMMAND`. + +https://github.com/restic/restic/pull/2094 diff --git a/changelog/0.9.4_2019-01-06/pull-2095 b/changelog/0.9.4_2019-01-06/pull-2095 new file mode 100644 index 000000000..e19b1efb5 --- /dev/null +++ b/changelog/0.9.4_2019-01-06/pull-2095 @@ -0,0 +1,7 @@ +Bugfix: consistently use local time for snapshots times + +By default snapshots created with restic backup were set to local time, +but when the --time flag was used the provided timestamp was parsed as +UTC. With this change all snapshots times are set to local time. + +https://github.com/restic/restic/pull/2095 diff --git a/changelog/CHANGELOG.tmpl b/changelog/CHANGELOG.tmpl new file mode 100644 index 000000000..8ce41a528 --- /dev/null +++ b/changelog/CHANGELOG.tmpl @@ -0,0 +1,32 @@ +{{- range $changes := . }}{{ with $changes -}} +Changelog for restic {{ .Version }} ({{ .Date }}) +======================================= + +The following sections list the changes in restic {{ .Version }} relevant to +restic users. The changes are ordered by importance. + +Summary +------- +{{ range $entry := .Entries }}{{ with $entry }} + * {{ .TypeShort }} #{{ .PrimaryID }}: {{ .Title }} +{{- end }}{{ end }} + +Details +------- +{{ range $entry := .Entries }}{{ with $entry }} + * {{ .Type }} #{{ .PrimaryID }}: {{ .Title }} +{{ range $par := .Paragraphs }} + {{ wrap $par 80 3 }} +{{ end -}} +{{ range $url := .IssueURLs }} + {{ $url -}} +{{ end -}} +{{ range $url := .PRURLs }} + {{ $url -}} +{{ end -}} +{{ range $url := .OtherURLs }} + {{ $url -}} +{{ end }} +{{ end }}{{ end }} + +{{ end }}{{ end -}} diff --git a/changelog/TEMPLATE b/changelog/TEMPLATE new file mode 100644 index 000000000..ec5952950 --- /dev/null +++ b/changelog/TEMPLATE @@ -0,0 +1,12 @@ +Bugfix: Fix behavior for foobar (in present tense) + +We've fixed the behavior for foobar, a long-standing annoyance for restic +users. + +The text in the paragraphs is written in past tense. The last section is a list +of issue URLs, PR URLs and other URLs. The first issue ID (or the first PR ID, +in case there aren't any issue links) is used as the primary ID. + +https://github.com/restic/restic/issues/1234 +https://github.com/restic/restic/pull/55555 +https://forum.restic/.net/foo/bar/baz diff --git a/changelog/changelog-github.tmpl b/changelog/changelog-github.tmpl new file mode 100644 index 000000000..d19788daf --- /dev/null +++ b/changelog/changelog-github.tmpl @@ -0,0 +1,31 @@ +{{- range $changes := . }}{{ with $changes -}} +Changelog for restic {{ .Version }} ({{ .Date }}) +======================================= + +The following sections list the changes in restic {{ .Version }} relevant to restic users. The changes are ordered by importance. + +Summary +------- +{{ range $entry := .Entries }}{{ with $entry }} + * {{ .TypeShort }} [#{{ .PrimaryID }}]({{ .PrimaryURL }}): {{ .Title }} +{{- end }}{{ end }} + +Details +------- +{{ range $entry := .Entries }}{{ with $entry }} + * {{ .Type }} #{{ .PrimaryID }}: {{ .Title }} +{{ range $par := .Paragraphs }} + {{ $par }} +{{ end }} + {{ range $id := .Issues -}} +{{ ` ` }}[#{{ $id }}](https://github.com/restic/restic/issues/{{ $id -}}) +{{- end -}} +{{ range $id := .PRs -}} +{{ ` ` }}[#{{ $id }}](https://github.com/restic/restic/pull/{{ $id -}}) +{{- end -}} +{{ ` ` }}{{ range $url := .OtherURLs -}} +{{ $url -}} +{{- end }} +{{ end }}{{ end }} + +{{ end }}{{ end -}} diff --git a/cmd/restic/.gitignore b/cmd/restic/.gitignore new file mode 100644 index 000000000..aee2e4ce1 --- /dev/null +++ b/cmd/restic/.gitignore @@ -0,0 +1 @@ +config.mk diff --git a/cmd/restic/cleanup.go b/cmd/restic/cleanup.go new file mode 100644 index 000000000..728883452 --- /dev/null +++ b/cmd/restic/cleanup.go @@ -0,0 +1,83 @@ +package main + +import ( + "fmt" + "os" + "os/signal" + "sync" + "syscall" + + "github.com/restic/restic/internal/debug" +) + +var cleanupHandlers struct { + sync.Mutex + list []func() error + done bool + ch chan os.Signal +} + +var stderr = os.Stderr + +func init() { + cleanupHandlers.ch = make(chan os.Signal) + go CleanupHandler(cleanupHandlers.ch) + signal.Notify(cleanupHandlers.ch, syscall.SIGINT) +} + +// AddCleanupHandler adds the function f to the list of cleanup handlers so +// that it is executed when all the cleanup handlers are run, e.g. when SIGINT +// is received. +func AddCleanupHandler(f func() error) { + cleanupHandlers.Lock() + defer cleanupHandlers.Unlock() + + // reset the done flag for integration tests + cleanupHandlers.done = false + + cleanupHandlers.list = append(cleanupHandlers.list, f) +} + +// RunCleanupHandlers runs all registered cleanup handlers +func RunCleanupHandlers() { + cleanupHandlers.Lock() + defer cleanupHandlers.Unlock() + + if cleanupHandlers.done { + return + } + cleanupHandlers.done = true + + for _, f := range cleanupHandlers.list { + err := f() + if err != nil { + fmt.Fprintf(stderr, "error in cleanup handler: %v\n", err) + } + } + cleanupHandlers.list = nil +} + +// CleanupHandler handles the SIGINT signals. +func CleanupHandler(c <-chan os.Signal) { + for s := range c { + debug.Log("signal %v received, cleaning up", s) + fmt.Fprintf(stderr, "%ssignal %v received, cleaning up\n", ClearLine(), s) + + code := 0 + + if s == syscall.SIGINT { + code = 130 + } else { + code = 1 + } + + Exit(code) + } +} + +// Exit runs the cleanup handlers and then terminates the process with the +// given exit code. +func Exit(code int) { + RunCleanupHandlers() + os.Exit(code) +} diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go new file mode 100644 index 000000000..fc24868a5 --- /dev/null +++ b/cmd/restic/cmd_backup.go @@ -0,0 +1,552 @@ +package main + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/spf13/cobra" + tomb "gopkg.in/tomb.v2" + + "github.com/restic/restic/internal/archiver" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/textfile" + "github.com/restic/restic/internal/ui" + "github.com/restic/restic/internal/ui/termstatus" +) + +var cmdBackup = &cobra.Command{ + Use: "backup [flags] FILE/DIR [FILE/DIR] ...", + Short: "Create a new backup of files and/or directories", + Long: ` +The "backup" command creates a new snapshot and saves the files and directories +given as the arguments. +`, + PreRun: func(cmd *cobra.Command, args []string) { + if backupOptions.Host == "" { + hostname, err := os.Hostname() + if err != nil { + debug.Log("os.Hostname() returned err: %v", err) + return + } + backupOptions.Host = hostname + } + }, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + if backupOptions.Stdin { + for _, filename := range backupOptions.FilesFrom { + if filename == "-" { + return errors.Fatal("cannot use both `--stdin` and `--files-from -`") + } + } + } + + var t tomb.Tomb + term := termstatus.New(globalOptions.stdout, globalOptions.stderr, globalOptions.Quiet) + t.Go(func() error { term.Run(t.Context(globalOptions.ctx)); return nil }) + + err := runBackup(backupOptions, globalOptions, term, args) + if err != nil { + return err + } + t.Kill(nil) + return t.Wait() + }, +} + +// BackupOptions bundles all options for the backup command. +type BackupOptions struct { + Parent string + Force bool + Excludes []string + ExcludeFiles []string + ExcludeOtherFS bool + ExcludeIfPresent []string + ExcludeCaches bool + Stdin bool + StdinFilename string + Tags []string + Host string + FilesFrom []string + TimeStamp string + WithAtime bool +} + +var backupOptions BackupOptions + +func init() { + cmdRoot.AddCommand(cmdBackup) + + f := cmdBackup.Flags() + f.StringVar(&backupOptions.Parent, "parent", "", "use this parent snapshot (default: last snapshot in the repo that has the same target files/directories)") + f.BoolVarP(&backupOptions.Force, "force", "f", false, `force re-reading the target files/directories (overrides the "parent" flag)`) + f.StringArrayVarP(&backupOptions.Excludes, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)") + f.StringArrayVar(&backupOptions.ExcludeFiles, "exclude-file", nil, "read exclude patterns from a `file` (can be specified multiple times)") + f.BoolVarP(&backupOptions.ExcludeOtherFS, "one-file-system", "x", false, "exclude other file systems") + f.StringArrayVar(&backupOptions.ExcludeIfPresent, "exclude-if-present", nil, "takes filename[:header], exclude contents of directories containing filename (except filename itself) if header of that file is as provided (can be specified multiple times)") + f.BoolVar(&backupOptions.ExcludeCaches, "exclude-caches", false, `excludes cache directories that are marked with a CACHEDIR.TAG file`) + f.BoolVar(&backupOptions.Stdin, "stdin", false, "read backup from stdin") + f.StringVar(&backupOptions.StdinFilename, "stdin-filename", "stdin", "file name to use when reading from stdin") + f.StringArrayVar(&backupOptions.Tags, "tag", nil, "add a `tag` for the new snapshot (can be specified multiple times)") + + f.StringVarP(&backupOptions.Host, "host", "H", "", "set the `hostname` for the snapshot manually. To prevent an expensive rescan use the \"parent\" flag") + f.StringVar(&backupOptions.Host, "hostname", "", "set the `hostname` for the snapshot manually") + f.MarkDeprecated("hostname", "use --host") + + f.StringArrayVar(&backupOptions.FilesFrom, "files-from", nil, "read the files to backup from file (can be combined with file args/can be specified multiple times)") + f.StringVar(&backupOptions.TimeStamp, "time", "", "time of the backup (ex. '2012-11-01 22:08:41') (default: now)") + f.BoolVar(&backupOptions.WithAtime, "with-atime", false, "store the atime for all files and directories") +} + +// filterExisting returns a slice of all existing items, or an error if no +// items exist at all. +func filterExisting(items []string) (result []string, err error) { + for _, item := range items { + _, err := fs.Lstat(item) + if err != nil && os.IsNotExist(errors.Cause(err)) { + Warnf("%v does not exist, skipping\n", item) + continue + } + + result = append(result, item) + } + + if len(result) == 0 { + return nil, errors.Fatal("all target directories/files do not exist") + } + + return +} + +// readFromFile will read all lines from the given filename and return them as +// a string array, if filename is empty readFromFile returns and empty string +// array. If filename is a dash (-), readFromFile will read the lines from the +// standard input. +func readLinesFromFile(filename string) ([]string, error) { + if filename == "" { + return nil, nil + } + + var ( + data []byte + err error + ) + + if filename == "-" { + data, err = ioutil.ReadAll(os.Stdin) + } else { + data, err = textfile.Read(filename) + } + + if err != nil { + return nil, err + } + + var lines []string + + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + // ignore empty lines + if line == "" { + continue + } + // strip comments + if strings.HasPrefix(line, "#") { + continue + } + lines = append(lines, line) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + return lines, nil +} + +// Check returns an error when an invalid combination of options was set. +func (opts BackupOptions) Check(gopts GlobalOptions, args []string) error { + if gopts.password == "" { + for _, filename := range opts.FilesFrom { + if filename == "-" { + return errors.Fatal("unable to read password from stdin when data is to be read from stdin, use --password-file or $RESTIC_PASSWORD") + } + } + } + + if opts.Stdin { + if len(opts.FilesFrom) > 0 { + return errors.Fatal("--stdin and --files-from cannot be used together") + } + + if len(args) > 0 { + return errors.Fatal("--stdin was specified and files/dirs were listed as arguments") + } + } + + return nil +} + +// collectRejectByNameFuncs returns a list of all functions which may reject data +// from being saved in a snapshot based on path only +func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository, targets []string) (fs []RejectByNameFunc, err error) { + // exclude restic cache + if repo.Cache != nil { + f, err := rejectResticCache(repo) + if err != nil { + return nil, err + } + + fs = append(fs, f) + } + + // add patterns from file + if len(opts.ExcludeFiles) > 0 { + excludes, err := readExcludePatternsFromFiles(opts.ExcludeFiles) + if err != nil { + return nil, err + } + opts.Excludes = append(opts.Excludes, excludes...) + } + + if len(opts.Excludes) > 0 { + fs = append(fs, rejectByPattern(opts.Excludes)) + } + + if opts.ExcludeCaches { + opts.ExcludeIfPresent = append(opts.ExcludeIfPresent, "CACHEDIR.TAG:Signature: 8a477f597d28d172789f06886806bc55") + } + + for _, spec := range opts.ExcludeIfPresent { + f, err := rejectIfPresent(spec) + if err != nil { + return nil, err + } + + fs = append(fs, f) + } + + return fs, nil +} + +// collectRejectFuncs returns a list of all functions which may reject data +// from being saved in a snapshot based on path and file info +func collectRejectFuncs(opts BackupOptions, repo *repository.Repository, targets []string) (fs []RejectFunc, err error) { + // allowed devices + if opts.ExcludeOtherFS && !opts.Stdin { + f, err := rejectByDevice(targets) + if err != nil { + return nil, err + } + fs = append(fs, f) + } + + return fs, nil +} + +// readExcludePatternsFromFiles reads all exclude files and returns the list of +// exclude patterns. For each line, leading and trailing white space is removed +// and comment lines are ignored. For each remaining pattern, environment +// variables are resolved. For adding a literal dollar sign ($), write $$ to +// the file. +func readExcludePatternsFromFiles(excludeFiles []string) ([]string, error) { + getenvOrDollar := func(s string) string { + if s == "$" { + return "$" + } + return os.Getenv(s) + } + + var excludes []string + for _, filename := range excludeFiles { + err := func() (err error) { + data, err := textfile.Read(filename) + if err != nil { + return err + } + + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + + // ignore empty lines + if line == "" { + continue + } + + // strip comments + if strings.HasPrefix(line, "#") { + continue + } + + line = os.Expand(line, getenvOrDollar) + excludes = append(excludes, line) + } + return scanner.Err() + }() + if err != nil { + return nil, err + } + } + return excludes, nil +} + +// collectTargets returns a list of target files/dirs from several sources. +func collectTargets(opts BackupOptions, args []string) (targets []string, err error) { + if opts.Stdin { + return nil, nil + } + + var lines []string + for _, file := range opts.FilesFrom { + fromfile, err := readLinesFromFile(file) + if err != nil { + return nil, err + } + + // expand wildcards + for _, line := range fromfile { + var expanded []string + expanded, err := filepath.Glob(line) + if err != nil { + return nil, errors.WithMessage(err, fmt.Sprintf("pattern: %s", line)) + } + if len(expanded) == 0 { + Warnf("pattern %q does not match any files, skipping\n", line) + } + lines = append(lines, expanded...) + } + } + + // merge files from files-from into normal args so we can reuse the normal + // args checks and have the ability to use both files-from and args at the + // same time + args = append(args, lines...) + if len(args) == 0 && !opts.Stdin { + return nil, errors.Fatal("nothing to backup, please specify target files/dirs") + } + + targets = args + targets, err = filterExisting(targets) + if err != nil { + return nil, err + } + + return targets, nil +} + +// parent returns the ID of the parent snapshot. If there is none, nil is +// returned. +func findParentSnapshot(ctx context.Context, repo restic.Repository, opts BackupOptions, targets []string) (parentID *restic.ID, err error) { + // Force using a parent + if !opts.Force && opts.Parent != "" { + id, err := restic.FindSnapshot(repo, opts.Parent) + if err != nil { + return nil, errors.Fatalf("invalid id %q: %v", opts.Parent, err) + } + + parentID = &id + } + + // Find last snapshot to set it as parent, if not already set + if !opts.Force && parentID == nil { + id, err := restic.FindLatestSnapshot(ctx, repo, targets, []restic.TagList{}, opts.Host) + if err == nil { + parentID = &id + } else if err != restic.ErrNoSnapshotFound { + return nil, err + } + } + + return parentID, nil +} + +func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Terminal, args []string) error { + err := opts.Check(gopts, args) + if err != nil { + return err + } + + targets, err := collectTargets(opts, args) + if err != nil { + return err + } + + timeStamp := time.Now() + if opts.TimeStamp != "" { + timeStamp, err = time.ParseInLocation(TimeFormat, opts.TimeStamp, time.Local) + if err != nil { + return errors.Fatalf("error in time option: %v\n", err) + } + } + + var t tomb.Tomb + + term.Print("open repository\n") + repo, err := OpenRepository(gopts) + if err != nil { + return err + } + + p := ui.NewBackup(term, gopts.verbosity) + + // use the terminal for stdout/stderr + prevStdout, prevStderr := gopts.stdout, gopts.stderr + defer func() { + gopts.stdout, gopts.stderr = prevStdout, prevStderr + }() + gopts.stdout, gopts.stderr = p.Stdout(), p.Stderr() + + if s, ok := os.LookupEnv("RESTIC_PROGRESS_FPS"); ok { + fps, err := strconv.Atoi(s) + if err == nil && fps >= 1 { + if fps > 60 { + fps = 60 + } + p.MinUpdatePause = time.Second / time.Duration(fps) + } + } + + t.Go(func() error { return p.Run(t.Context(gopts.ctx)) }) + + p.V("lock repository") + lock, err := lockRepo(repo) + defer unlockRepo(lock) + if err != nil { + return err + } + + // rejectByNameFuncs collect functions that can reject items from the backup based on path only + rejectByNameFuncs, err := collectRejectByNameFuncs(opts, repo, targets) + if err != nil { + return err + } + + // rejectFuncs collect functions that can reject items from the backup based on path and file info + rejectFuncs, err := collectRejectFuncs(opts, repo, targets) + if err != nil { + return err + } + + p.V("load index files") + err = repo.LoadIndex(gopts.ctx) + if err != nil { + return err + } + + parentSnapshotID, err := findParentSnapshot(gopts.ctx, repo, opts, targets) + if err != nil { + return err + } + + if parentSnapshotID != nil { + p.V("using parent snapshot %v\n", parentSnapshotID.Str()) + } + + selectByNameFilter := func(item string) bool { + for _, reject := range rejectByNameFuncs { + if reject(item) { + return false + } + } + return true + } + + selectFilter := func(item string, fi os.FileInfo) bool { + for _, reject := range rejectFuncs { + if reject(item, fi) { + return false + } + } + return true + } + + var targetFS fs.FS = fs.Local{} + if opts.Stdin { + p.V("read data from stdin") + targetFS = &fs.Reader{ + ModTime: timeStamp, + Name: opts.StdinFilename, + Mode: 0644, + ReadCloser: os.Stdin, + } + targets = []string{opts.StdinFilename} + } + + sc := archiver.NewScanner(targetFS) + sc.SelectByName = selectByNameFilter + sc.Select = selectFilter + sc.Error = p.ScannerError + sc.Result = p.ReportTotal + + p.V("start scan on %v", targets) + t.Go(func() error { return sc.Scan(t.Context(gopts.ctx), targets) }) + + arch := archiver.New(repo, targetFS, archiver.Options{}) + arch.SelectByName = selectByNameFilter + arch.Select = selectFilter + arch.WithAtime = opts.WithAtime + arch.Error = p.Error + arch.CompleteItem = p.CompleteItemFn + arch.StartFile = p.StartFile + arch.CompleteBlob = p.CompleteBlob + + if parentSnapshotID == nil { + parentSnapshotID = &restic.ID{} + } + + snapshotOpts := archiver.SnapshotOptions{ + Excludes: opts.Excludes, + Tags: opts.Tags, + Time: timeStamp, + Hostname: opts.Host, + ParentSnapshot: *parentSnapshotID, + } + + uploader := archiver.IndexUploader{ + Repository: repo, + Start: func() { + p.VV("uploading intermediate index") + }, + Complete: func(id restic.ID) { + p.V("uploaded intermediate index %v", id.Str()) + }, + } + + t.Go(func() error { + return uploader.Upload(gopts.ctx, t.Context(gopts.ctx), 30*time.Second) + }) + + p.V("start backup on %v", targets) + _, id, err := arch.Snapshot(gopts.ctx, targets, snapshotOpts) + if err != nil { + return errors.Fatalf("unable to save snapshot: %v", err) + } + + p.Finish() + p.P("snapshot %s saved\n", id.Str()) + + // cleanly shutdown all running goroutines + t.Kill(nil) + + // let's see if one returned an error + err = t.Wait() + if err != nil { + return err + } + + return nil +} diff --git a/cmd/restic/cmd_cache.go b/cmd/restic/cmd_cache.go new file mode 100644 index 000000000..9a2ebd826 --- /dev/null +++ b/cmd/restic/cmd_cache.go @@ -0,0 +1,166 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + "sort" + "time" + + "github.com/restic/restic/internal/cache" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/ui/table" + "github.com/spf13/cobra" +) + +var cmdCache = &cobra.Command{ + Use: "cache", + Short: "Operate on local cache directories", + Long: ` +The "cache" command allows listing and cleaning local cache directories. +`, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runCache(cacheOptions, globalOptions, args) + }, +} + +// CacheOptions bundles all options for the snapshots command. +type CacheOptions struct { + Cleanup bool + MaxAge uint + NoSize bool +} + +var cacheOptions CacheOptions + +func init() { + cmdRoot.AddCommand(cmdCache) + + f := cmdCache.Flags() + f.BoolVar(&cacheOptions.Cleanup, "cleanup", false, "remove old cache directories") + f.UintVar(&cacheOptions.MaxAge, "max-age", 30, "max age in `days` for cache directories to be considered old") + f.BoolVar(&cacheOptions.NoSize, "no-size", false, "do not output the size of the cache directories") +} + +func runCache(opts CacheOptions, gopts GlobalOptions, args []string) error { + if len(args) > 0 { + return errors.Fatal("the cache command has no arguments") + } + + if gopts.NoCache { + return errors.Fatal("Refusing to do anything, the cache is disabled") + } + + var ( + cachedir = gopts.CacheDir + err error + ) + + if cachedir == "" { + cachedir, err = cache.DefaultDir() + if err != nil { + return err + } + } + + if opts.Cleanup || gopts.CleanupCache { + oldDirs, err := cache.OlderThan(cachedir, time.Duration(opts.MaxAge)*24*time.Hour) + if err != nil { + return err + } + + if len(oldDirs) == 0 { + Verbosef("no old cache dirs found\n") + return nil + } + + Verbosef("remove %d old cache directories\n", len(oldDirs)) + + for _, item := range oldDirs { + dir := filepath.Join(cachedir, item.Name()) + err = fs.RemoveAll(dir) + if err != nil { + Warnf("unable to remove %v: %v\n", dir, err) + } + } + + return nil + } + + tab := table.New() + + type data struct { + ID string + Last string + Old string + Size string + } + + tab.AddColumn("Repo ID", "{{ .ID }}") + tab.AddColumn("Last Used", "{{ .Last }}") + tab.AddColumn("Old", "{{ .Old }}") + + if !opts.NoSize { + tab.AddColumn("Size", "{{ .Size }}") + } + + dirs, err := cache.All(cachedir) + if err != nil { + return err + } + + if len(dirs) == 0 { + Printf("no cache dirs found, basedir is %v\n", cachedir) + return nil + } + + sort.Slice(dirs, func(i, j int) bool { + return dirs[i].ModTime().Before(dirs[j].ModTime()) + }) + + for _, entry := range dirs { + var old string + if cache.IsOld(entry.ModTime(), time.Duration(opts.MaxAge)*24*time.Hour) { + old = "yes" + } + + var size string + if !opts.NoSize { + bytes, err := dirSize(filepath.Join(cachedir, entry.Name())) + if err != nil { + return err + } + size = fmt.Sprintf("%11s", formatBytes(uint64(bytes))) + } + + tab.AddRow(data{ + entry.Name()[:10], + fmt.Sprintf("%d days ago", uint(time.Since(entry.ModTime()).Hours()/24)), + old, + size, + }) + } + + tab.Write(gopts.stdout) + Printf("%d cache dirs in %s\n", len(dirs), cachedir) + + return nil +} + +func dirSize(path string) (int64, error) { + var size int64 + err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error { + if err != nil || info == nil { + return err + } + + if !info.IsDir() { + size += info.Size() + } + + return nil + }) + return size, err +} diff --git a/cmd/restic/cmd_cat.go b/cmd/restic/cmd_cat.go new file mode 100644 index 000000000..e735daf88 --- /dev/null +++ b/cmd/restic/cmd_cat.go @@ -0,0 +1,190 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/spf13/cobra" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" +) + +var cmdCat = &cobra.Command{ + Use: "cat [flags] [pack|blob|snapshot|index|key|masterkey|config|lock] ID", + Short: "Print internal objects to stdout", + Long: ` +The "cat" command is used to print internal objects to stdout. +`, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runCat(globalOptions, args) + }, +} + +func init() { + cmdRoot.AddCommand(cmdCat) +} + +func runCat(gopts GlobalOptions, args []string) error { + if len(args) < 1 || (args[0] != "masterkey" && args[0] != "config" && len(args) != 2) { + return errors.Fatal("type or ID not specified") + } + + repo, err := OpenRepository(gopts) + if err != nil { + return err + } + + lock, err := lockRepo(repo) + defer unlockRepo(lock) + if err != nil { + return err + } + + tpe := args[0] + + var id restic.ID + if tpe != "masterkey" && tpe != "config" { + id, err = restic.ParseID(args[1]) + if err != nil { + if tpe != "snapshot" { + return errors.Fatalf("unable to parse ID: %v\n", err) + } + + // find snapshot id with prefix + id, err = restic.FindSnapshot(repo, args[1]) + if err != nil { + return errors.Fatalf("could not find snapshot: %v\n", err) + } + } + } + + // handle all types that don't need an index + switch tpe { + case "config": + buf, err := json.MarshalIndent(repo.Config(), "", " ") + if err != nil { + return err + } + + fmt.Println(string(buf)) + return nil + case "index": + buf, err := repo.LoadAndDecrypt(gopts.ctx, restic.IndexFile, id) + if err != nil { + return err + } + + _, err = os.Stdout.Write(append(buf, '\n')) + return err + + case "snapshot": + sn := &restic.Snapshot{} + err = repo.LoadJSONUnpacked(gopts.ctx, restic.SnapshotFile, id, sn) + if err != nil { + return err + } + + buf, err := json.MarshalIndent(&sn, "", " ") + if err != nil { + return err + } + + fmt.Println(string(buf)) + + return nil + case "key": + h := restic.Handle{Type: restic.KeyFile, Name: id.String()} + buf, err := backend.LoadAll(gopts.ctx, repo.Backend(), h) + if err != nil { + return err + } + + key := &repository.Key{} + err = json.Unmarshal(buf, key) + if err != nil { + return err + } + + buf, err = json.MarshalIndent(&key, "", " ") + if err != nil { + return err + } + + fmt.Println(string(buf)) + return nil + case "masterkey": + buf, err := json.MarshalIndent(repo.Key(), "", " ") + if err != nil { + return err + } + + fmt.Println(string(buf)) + return nil + case "lock": + lock, err := restic.LoadLock(gopts.ctx, repo, id) + if err != nil { + return err + } + + buf, err := json.MarshalIndent(&lock, "", " ") + if err != nil { + return err + } + + fmt.Println(string(buf)) + + return nil + } + + // load index, handle all the other types + err = repo.LoadIndex(gopts.ctx) + if err != nil { + return err + } + + switch tpe { + case "pack": + h := restic.Handle{Type: restic.DataFile, Name: id.String()} + buf, err := backend.LoadAll(gopts.ctx, repo.Backend(), h) + if err != nil { + return err + } + + hash := restic.Hash(buf) + if !hash.Equal(id) { + fmt.Fprintf(stderr, "Warning: hash of data does not match ID, want\n %v\ngot:\n %v\n", id.String(), hash.String()) + } + + _, err = os.Stdout.Write(buf) + return err + + case "blob": + for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} { + list, found := repo.Index().Lookup(id, t) + if !found { + continue + } + blob := list[0] + + buf := make([]byte, blob.Length) + n, err := repo.LoadBlob(gopts.ctx, t, id, buf) + if err != nil { + return err + } + buf = buf[:n] + + _, err = os.Stdout.Write(buf) + return err + } + + return errors.Fatal("blob not found") + + default: + return errors.Fatal("invalid type") + } +} diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go new file mode 100644 index 000000000..bee7eae54 --- /dev/null +++ b/cmd/restic/cmd_check.go @@ -0,0 +1,298 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" + "time" + + "github.com/spf13/cobra" + + "github.com/restic/restic/internal/checker" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/restic" +) + +var cmdCheck = &cobra.Command{ + Use: "check [flags]", + Short: "Check the repository for errors", + Long: ` +The "check" command tests the repository for errors and reports any errors it +finds. It can also be used to read all data and therefore simulate a restore. + +By default, the "check" command will always load all data directly from the +repository and not use a local cache. +`, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runCheck(checkOptions, globalOptions, args) + }, + PreRunE: func(cmd *cobra.Command, args []string) error { + return checkFlags(checkOptions) + }, +} + +// CheckOptions bundles all options for the 'check' command. +type CheckOptions struct { + ReadData bool + ReadDataSubset string + CheckUnused bool + WithCache bool +} + +var checkOptions CheckOptions + +func init() { + cmdRoot.AddCommand(cmdCheck) + + f := cmdCheck.Flags() + f.BoolVar(&checkOptions.ReadData, "read-data", false, "read all data blobs") + f.StringVar(&checkOptions.ReadDataSubset, "read-data-subset", "", "read subset n of m data packs (format: `n/m`)") + f.BoolVar(&checkOptions.CheckUnused, "check-unused", false, "find unused blobs") + f.BoolVar(&checkOptions.WithCache, "with-cache", false, "use the cache") +} + +func checkFlags(opts CheckOptions) error { + if opts.ReadData && opts.ReadDataSubset != "" { + return errors.Fatalf("check flags --read-data and --read-data-subset cannot be used together") + } + if opts.ReadDataSubset != "" { + dataSubset, err := stringToIntSlice(opts.ReadDataSubset) + if err != nil || len(dataSubset) != 2 { + return errors.Fatalf("check flag --read-data-subset must have two positive integer values, e.g. --read-data-subset=1/2") + } + if dataSubset[0] == 0 || dataSubset[1] == 0 || dataSubset[0] > dataSubset[1] { + return errors.Fatalf("check flag --read-data-subset=n/t values must be positive integers, and n <= t, e.g. --read-data-subset=1/2") + } + } + + return nil +} + +// stringToIntSlice converts string to []uint, using '/' as element separator +func stringToIntSlice(param string) (split []uint, err error) { + if param == "" { + return nil, nil + } + parts := strings.Split(param, "/") + result := make([]uint, len(parts)) + for idx, part := range parts { + uintval, err := strconv.ParseUint(part, 10, 0) + if err != nil { + return nil, err + } + result[idx] = uint(uintval) + } + return result, nil +} + +func newReadProgress(gopts GlobalOptions, todo restic.Stat) *restic.Progress { + if gopts.Quiet { + return nil + } + + readProgress := restic.NewProgress() + + readProgress.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) { + status := fmt.Sprintf("[%s] %s %d / %d items", + formatDuration(d), + formatPercent(s.Blobs, todo.Blobs), + s.Blobs, todo.Blobs) + + if w := stdoutTerminalWidth(); w > 0 { + if len(status) > w { + max := w - len(status) - 4 + status = status[:max] + "... " + } + } + + PrintProgress("%s", status) + } + + readProgress.OnDone = func(s restic.Stat, d time.Duration, ticker bool) { + fmt.Printf("\nduration: %s\n", formatDuration(d)) + } + + return readProgress +} + +// prepareCheckCache configures a special cache directory for check. +// +// * if --with-cache is specified, the default cache is used +// * if the user explicitly requested --no-cache, we don't use any cache +// * if the user provides --cache-dir, we use a cache in a temporary sub-directory of the specified directory and the sub-directory is deleted after the check +// * by default, we use a cache in a temporary directory that is deleted after the check +func prepareCheckCache(opts CheckOptions, gopts *GlobalOptions) (cleanup func()) { + cleanup = func() {} + if opts.WithCache { + // use the default cache, no setup needed + return cleanup + } + + if gopts.NoCache { + // don't use any cache, no setup needed + return cleanup + } + + cachedir := gopts.CacheDir + + // use a cache in a temporary directory + tempdir, err := ioutil.TempDir(cachedir, "restic-check-cache-") + if err != nil { + // if an error occurs, don't use any cache + Warnf("unable to create temporary directory for cache during check, disabling cache: %v\n", err) + gopts.NoCache = true + return cleanup + } + + gopts.CacheDir = tempdir + Verbosef("using temporary cache in %v\n", tempdir) + + cleanup = func() { + err := fs.RemoveAll(tempdir) + if err != nil { + Warnf("error removing temporary cache directory: %v\n", err) + } + } + + return cleanup +} + +func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error { + if len(args) != 0 { + return errors.Fatal("check has no arguments") + } + + cleanup := prepareCheckCache(opts, &gopts) + AddCleanupHandler(func() error { + cleanup() + return nil + }) + + repo, err := OpenRepository(gopts) + if err != nil { + return err + } + + if !gopts.NoLock { + Verbosef("create exclusive lock for repository\n") + lock, err := lockRepoExclusive(repo) + defer unlockRepo(lock) + if err != nil { + return err + } + } + + chkr := checker.New(repo) + + Verbosef("load indexes\n") + hints, errs := chkr.LoadIndex(gopts.ctx) + + dupFound := false + for _, hint := range hints { + Printf("%v\n", hint) + if _, ok := hint.(checker.ErrDuplicatePacks); ok { + dupFound = true + } + } + + if dupFound { + Printf("This is non-critical, you can run `restic rebuild-index' to correct this\n") + } + + if len(errs) > 0 { + for _, err := range errs { + Warnf("error: %v\n", err) + } + return errors.Fatal("LoadIndex returned errors") + } + + errorsFound := false + orphanedPacks := 0 + errChan := make(chan error) + + Verbosef("check all packs\n") + go chkr.Packs(gopts.ctx, errChan) + + for err := range errChan { + if checker.IsOrphanedPack(err) { + orphanedPacks++ + Verbosef("%v\n", err) + continue + } + errorsFound = true + fmt.Fprintf(os.Stderr, "%v\n", err) + } + + if orphanedPacks > 0 { + Verbosef("%d additional files were found in the repo, which likely contain duplicate data.\nYou can run `restic prune` to correct this.\n", orphanedPacks) + } + + Verbosef("check snapshots, trees and blobs\n") + errChan = make(chan error) + go chkr.Structure(gopts.ctx, errChan) + + for err := range errChan { + errorsFound = true + if e, ok := err.(checker.TreeError); ok { + fmt.Fprintf(os.Stderr, "error for tree %v:\n", e.ID.Str()) + for _, treeErr := range e.Errors { + fmt.Fprintf(os.Stderr, " %v\n", treeErr) + } + } else { + fmt.Fprintf(os.Stderr, "error: %v\n", err) + } + } + + if opts.CheckUnused { + for _, id := range chkr.UnusedBlobs() { + Verbosef("unused blob %v\n", id.Str()) + errorsFound = true + } + } + + doReadData := func(bucket, totalBuckets uint) { + packs := restic.IDSet{} + for pack := range chkr.GetPacks() { + if (uint(pack[0]) % totalBuckets) == (bucket - 1) { + packs.Insert(pack) + } + } + packCount := uint64(len(packs)) + + if packCount < chkr.CountPacks() { + Verbosef(fmt.Sprintf("read group #%d of %d data packs (out of total %d packs in %d groups)\n", bucket, packCount, chkr.CountPacks(), totalBuckets)) + } else { + Verbosef("read all data\n") + } + + p := newReadProgress(gopts, restic.Stat{Blobs: packCount}) + errChan := make(chan error) + + go chkr.ReadPacks(gopts.ctx, packs, p, errChan) + + for err := range errChan { + errorsFound = true + fmt.Fprintf(os.Stderr, "%v\n", err) + } + } + + switch { + case opts.ReadData: + doReadData(1, 1) + case opts.ReadDataSubset != "": + dataSubset, _ := stringToIntSlice(opts.ReadDataSubset) + doReadData(dataSubset[0], dataSubset[1]) + } + + if errorsFound { + return errors.Fatal("repository contains errors") + } + + Verbosef("no errors were found\n") + + return nil +} diff --git a/cmd/restic/cmd_debug.go b/cmd/restic/cmd_debug.go new file mode 100644 index 000000000..8f25933f9 --- /dev/null +++ b/cmd/restic/cmd_debug.go @@ -0,0 +1,172 @@ +// +build debug + +package main + +import ( + "context" + "encoding/json" + "fmt" + "io" + "os" + + "github.com/spf13/cobra" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/pack" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" +) + +var cmdDebug = &cobra.Command{ + Use: "debug", + Short: "Debug commands", +} + +var cmdDebugDump = &cobra.Command{ + Use: "dump [indexes|snapshots|all|packs]", + Short: "Dump data structures", + Long: ` +The "dump" command dumps data structures from the repository as JSON objects. It +is used for debugging purposes only.`, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runDebugDump(globalOptions, args) + }, +} + +func init() { + cmdRoot.AddCommand(cmdDebug) + cmdDebug.AddCommand(cmdDebugDump) +} + +func prettyPrintJSON(wr io.Writer, item interface{}) error { + buf, err := json.MarshalIndent(item, "", " ") + if err != nil { + return err + } + + _, err = wr.Write(append(buf, '\n')) + return err +} + +func debugPrintSnapshots(repo *repository.Repository, wr io.Writer) error { + return repo.List(context.TODO(), restic.SnapshotFile, func(id restic.ID, size int64) error { + snapshot, err := restic.LoadSnapshot(context.TODO(), repo, id) + if err != nil { + return err + } + + fmt.Fprintf(wr, "snapshot_id: %v\n", id) + + return prettyPrintJSON(wr, snapshot) + }) +} + +// Pack is the struct used in printPacks. +type Pack struct { + Name string `json:"name"` + + Blobs []Blob `json:"blobs"` +} + +// Blob is the struct used in printPacks. +type Blob struct { + Type restic.BlobType `json:"type"` + Length uint `json:"length"` + ID restic.ID `json:"id"` + Offset uint `json:"offset"` +} + +func printPacks(repo *repository.Repository, wr io.Writer) error { + + return repo.List(context.TODO(), restic.DataFile, func(id restic.ID, size int64) error { + h := restic.Handle{Type: restic.DataFile, Name: id.String()} + + blobs, err := pack.List(repo.Key(), restic.ReaderAt(repo.Backend(), h), size) + if err != nil { + fmt.Fprintf(os.Stderr, "error for pack %v: %v\n", id.Str(), err) + return nil + } + + p := Pack{ + Name: id.String(), + Blobs: make([]Blob, len(blobs)), + } + for i, blob := range blobs { + p.Blobs[i] = Blob{ + Type: blob.Type, + Length: blob.Length, + ID: blob.ID, + Offset: blob.Offset, + } + } + + return prettyPrintJSON(os.Stdout, p) + }) + + return nil +} + +func dumpIndexes(repo restic.Repository) error { + return repo.List(context.TODO(), restic.IndexFile, func(id restic.ID, size int64) error { + fmt.Printf("index_id: %v\n", id) + + idx, err := repository.LoadIndex(context.TODO(), repo, id) + if err != nil { + return err + } + + return idx.Dump(os.Stdout) + }) +} + +func runDebugDump(gopts GlobalOptions, args []string) error { + if len(args) != 1 { + return errors.Fatal("type not specified") + } + + repo, err := OpenRepository(gopts) + if err != nil { + return err + } + + if !gopts.NoLock { + lock, err := lockRepo(repo) + defer unlockRepo(lock) + if err != nil { + return err + } + } + + err = repo.LoadIndex(gopts.ctx) + if err != nil { + return err + } + + tpe := args[0] + + switch tpe { + case "indexes": + return dumpIndexes(repo) + case "snapshots": + return debugPrintSnapshots(repo, os.Stdout) + case "packs": + return printPacks(repo, os.Stdout) + case "all": + fmt.Printf("snapshots:\n") + err := debugPrintSnapshots(repo, os.Stdout) + if err != nil { + return err + } + + fmt.Printf("\nindexes:\n") + err = dumpIndexes(repo) + if err != nil { + return err + } + + return nil + default: + return errors.Fatalf("no such type %q", tpe) + } +} diff --git a/cmd/restic/cmd_diff.go b/cmd/restic/cmd_diff.go new file mode 100644 index 000000000..f37c2c7ef --- /dev/null +++ b/cmd/restic/cmd_diff.go @@ -0,0 +1,356 @@ +package main + +import ( + "context" + "path" + "reflect" + "sort" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + "github.com/spf13/cobra" +) + +var cmdDiff = &cobra.Command{ + Use: "diff snapshot-ID snapshot-ID", + Short: "Show differences between two snapshots", + Long: ` +The "diff" command shows differences from the first to the second snapshot. The +first characters in each line display what has happened to a particular file or +directory: + +* + The item was added +* - The item was removed +* U The metadata (access mode, timestamps, ...) for the item was updated +* M The file's content was modified +* T The type was changed, e.g. a file was made a symlink +`, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runDiff(diffOptions, globalOptions, args) + }, +} + +// DiffOptions collects all options for the diff command. +type DiffOptions struct { + ShowMetadata bool +} + +var diffOptions DiffOptions + +func init() { + cmdRoot.AddCommand(cmdDiff) + + f := cmdDiff.Flags() + f.BoolVar(&diffOptions.ShowMetadata, "metadata", false, "print changes in metadata") +} + +func loadSnapshot(ctx context.Context, repo *repository.Repository, desc string) (*restic.Snapshot, error) { + id, err := restic.FindSnapshot(repo, desc) + if err != nil { + return nil, err + } + + return restic.LoadSnapshot(ctx, repo, id) +} + +// Comparer collects all things needed to compare two snapshots. +type Comparer struct { + repo restic.Repository + opts DiffOptions +} + +// DiffStat collects stats for all types of items. +type DiffStat struct { + Files, Dirs, Others int + DataBlobs, TreeBlobs int + Bytes int +} + +// Add adds stats information for node to s. +func (s *DiffStat) Add(node *restic.Node) { + if node == nil { + return + } + + switch node.Type { + case "file": + s.Files++ + case "dir": + s.Dirs++ + default: + s.Others++ + } +} + +// addBlobs adds the blobs of node to s. +func addBlobs(bs restic.BlobSet, node *restic.Node) { + if node == nil { + return + } + + switch node.Type { + case "file": + for _, blob := range node.Content { + h := restic.BlobHandle{ + ID: blob, + Type: restic.DataBlob, + } + bs.Insert(h) + } + case "dir": + h := restic.BlobHandle{ + ID: *node.Subtree, + Type: restic.TreeBlob, + } + bs.Insert(h) + } +} + +// DiffStats collects the differences between two snapshots. +type DiffStats struct { + ChangedFiles int + Added DiffStat + Removed DiffStat + BlobsBefore, BlobsAfter restic.BlobSet +} + +// NewDiffStats creates new stats for a diff run. +func NewDiffStats() *DiffStats { + return &DiffStats{ + BlobsBefore: restic.NewBlobSet(), + BlobsAfter: restic.NewBlobSet(), + } +} + +// updateBlobs updates the blob counters in the stats struct. +func updateBlobs(repo restic.Repository, blobs restic.BlobSet, stats *DiffStat) { + for h := range blobs { + switch h.Type { + case restic.DataBlob: + stats.DataBlobs++ + case restic.TreeBlob: + stats.TreeBlobs++ + } + + size, found := repo.LookupBlobSize(h.ID, h.Type) + if !found { + Warnf("unable to find blob size for %v\n", h) + continue + } + + stats.Bytes += int(size) + } +} + +func (c *Comparer) printDir(ctx context.Context, mode string, stats *DiffStat, blobs restic.BlobSet, prefix string, id restic.ID) error { + debug.Log("print %v tree %v", mode, id) + tree, err := c.repo.LoadTree(ctx, id) + if err != nil { + return err + } + + for _, node := range tree.Nodes { + name := path.Join(prefix, node.Name) + if node.Type == "dir" { + name += "/" + } + Printf("%-5s%v\n", mode, name) + stats.Add(node) + addBlobs(blobs, node) + + if node.Type == "dir" { + err := c.printDir(ctx, mode, stats, blobs, name, *node.Subtree) + if err != nil { + Warnf("error: %v\n", err) + } + } + } + + return nil +} + +func uniqueNodeNames(tree1, tree2 *restic.Tree) (tree1Nodes, tree2Nodes map[string]*restic.Node, uniqueNames []string) { + names := make(map[string]struct{}) + tree1Nodes = make(map[string]*restic.Node) + for _, node := range tree1.Nodes { + tree1Nodes[node.Name] = node + names[node.Name] = struct{}{} + } + + tree2Nodes = make(map[string]*restic.Node) + for _, node := range tree2.Nodes { + tree2Nodes[node.Name] = node + names[node.Name] = struct{}{} + } + + uniqueNames = make([]string, 0, len(names)) + for name := range names { + uniqueNames = append(uniqueNames, name) + } + + sort.Sort(sort.StringSlice(uniqueNames)) + return tree1Nodes, tree2Nodes, uniqueNames +} + +func (c *Comparer) diffTree(ctx context.Context, stats *DiffStats, prefix string, id1, id2 restic.ID) error { + debug.Log("diffing %v to %v", id1, id2) + tree1, err := c.repo.LoadTree(ctx, id1) + if err != nil { + return err + } + + tree2, err := c.repo.LoadTree(ctx, id2) + if err != nil { + return err + } + + tree1Nodes, tree2Nodes, names := uniqueNodeNames(tree1, tree2) + + for _, name := range names { + node1, t1 := tree1Nodes[name] + node2, t2 := tree2Nodes[name] + + addBlobs(stats.BlobsBefore, node1) + addBlobs(stats.BlobsAfter, node2) + + switch { + case t1 && t2: + name := path.Join(prefix, name) + mod := "" + + if node1.Type != node2.Type { + mod += "T" + } + + if node2.Type == "dir" { + name += "/" + } + + if node1.Type == "file" && + node2.Type == "file" && + !reflect.DeepEqual(node1.Content, node2.Content) { + mod += "M" + stats.ChangedFiles++ + } else if c.opts.ShowMetadata && !node1.Equals(*node2) { + mod += "U" + } + + if mod != "" { + Printf("%-5s%v\n", mod, name) + } + + if node1.Type == "dir" && node2.Type == "dir" { + err := c.diffTree(ctx, stats, name, *node1.Subtree, *node2.Subtree) + if err != nil { + Warnf("error: %v\n", err) + } + } + case t1 && !t2: + prefix := path.Join(prefix, name) + if node1.Type == "dir" { + prefix += "/" + } + Printf("%-5s%v\n", "-", prefix) + stats.Removed.Add(node1) + + if node1.Type == "dir" { + err := c.printDir(ctx, "-", &stats.Removed, stats.BlobsBefore, prefix, *node1.Subtree) + if err != nil { + Warnf("error: %v\n", err) + } + } + case !t1 && t2: + prefix := path.Join(prefix, name) + if node2.Type == "dir" { + prefix += "/" + } + Printf("%-5s%v\n", "+", prefix) + stats.Added.Add(node2) + + if node2.Type == "dir" { + err := c.printDir(ctx, "+", &stats.Added, stats.BlobsAfter, prefix, *node2.Subtree) + if err != nil { + Warnf("error: %v\n", err) + } + } + } + } + + return nil +} + +func runDiff(opts DiffOptions, gopts GlobalOptions, args []string) error { + if len(args) != 2 { + return errors.Fatalf("specify two snapshot IDs") + } + + ctx, cancel := context.WithCancel(gopts.ctx) + defer cancel() + + repo, err := OpenRepository(gopts) + if err != nil { + return err + } + + if err = repo.LoadIndex(ctx); err != nil { + return err + } + + if !gopts.NoLock { + lock, err := lockRepo(repo) + defer unlockRepo(lock) + if err != nil { + return err + } + } + + sn1, err := loadSnapshot(ctx, repo, args[0]) + if err != nil { + return err + } + + sn2, err := loadSnapshot(ctx, repo, args[1]) + if err != nil { + return err + } + + Verbosef("comparing snapshot %v to %v:\n\n", sn1.ID().Str(), sn2.ID().Str()) + + if sn1.Tree == nil { + return errors.Errorf("snapshot %v has nil tree", sn1.ID().Str()) + } + + if sn2.Tree == nil { + return errors.Errorf("snapshot %v has nil tree", sn2.ID().Str()) + } + + c := &Comparer{ + repo: repo, + opts: diffOptions, + } + + stats := NewDiffStats() + + err = c.diffTree(ctx, stats, "/", *sn1.Tree, *sn2.Tree) + if err != nil { + return err + } + + both := stats.BlobsBefore.Intersect(stats.BlobsAfter) + updateBlobs(repo, stats.BlobsBefore.Sub(both), &stats.Removed) + updateBlobs(repo, stats.BlobsAfter.Sub(both), &stats.Added) + + Printf("\n") + Printf("Files: %5d new, %5d removed, %5d changed\n", stats.Added.Files, stats.Removed.Files, stats.ChangedFiles) + Printf("Dirs: %5d new, %5d removed\n", stats.Added.Dirs, stats.Removed.Dirs) + Printf("Others: %5d new, %5d removed\n", stats.Added.Others, stats.Removed.Others) + Printf("Data Blobs: %5d new, %5d removed\n", stats.Added.DataBlobs, stats.Removed.DataBlobs) + Printf("Tree Blobs: %5d new, %5d removed\n", stats.Added.TreeBlobs, stats.Removed.TreeBlobs) + Printf(" Added: %-5s\n", formatBytes(uint64(stats.Added.Bytes))) + Printf(" Removed: %-5s\n", formatBytes(uint64(stats.Removed.Bytes))) + + return nil +} diff --git a/cmd/restic/cmd_dump.go b/cmd/restic/cmd_dump.go new file mode 100644 index 000000000..a2e4fbe4a --- /dev/null +++ b/cmd/restic/cmd_dump.go @@ -0,0 +1,182 @@ +package main + +import ( + "context" + "fmt" + "os" + "path" + "path/filepath" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + + "github.com/spf13/cobra" +) + +var cmdDump = &cobra.Command{ + Use: "dump [flags] snapshotID file", + Short: "Print a backed-up file to stdout", + Long: ` +The "dump" command extracts a single file from a snapshot from the repository and +prints its contents to stdout. + +The special snapshot "latest" can be used to use the latest snapshot in the +repository. +`, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runDump(dumpOptions, globalOptions, args) + }, +} + +// DumpOptions collects all options for the dump command. +type DumpOptions struct { + Host string + Paths []string + Tags restic.TagLists +} + +var dumpOptions DumpOptions + +func init() { + cmdRoot.AddCommand(cmdDump) + + flags := cmdDump.Flags() + flags.StringVarP(&dumpOptions.Host, "host", "H", "", `only consider snapshots for this host when the snapshot ID is "latest"`) + flags.Var(&dumpOptions.Tags, "tag", "only consider snapshots which include this `taglist` for snapshot ID \"latest\"") + flags.StringArrayVar(&dumpOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path` for snapshot ID \"latest\"") +} + +func splitPath(p string) []string { + d, f := path.Split(p) + if d == "" || d == "/" { + return []string{f} + } + s := splitPath(path.Clean(d)) + return append(s, f) +} + +func dumpNode(ctx context.Context, repo restic.Repository, node *restic.Node) error { + var buf []byte + for _, id := range node.Content { + size, found := repo.LookupBlobSize(id, restic.DataBlob) + if !found { + return errors.Errorf("id %v not found in repository", id) + } + + buf = buf[:cap(buf)] + if len(buf) < restic.CiphertextLength(int(size)) { + buf = restic.NewBlobBuffer(int(size)) + } + + n, err := repo.LoadBlob(ctx, restic.DataBlob, id, buf) + if err != nil { + return err + } + buf = buf[:n] + + _, err = os.Stdout.Write(buf) + if err != nil { + return errors.Wrap(err, "Write") + } + } + return nil +} + +func printFromTree(ctx context.Context, tree *restic.Tree, repo restic.Repository, prefix string, pathComponents []string) error { + if tree == nil { + return fmt.Errorf("called with a nil tree") + } + if repo == nil { + return fmt.Errorf("called with a nil repository") + } + l := len(pathComponents) + if l == 0 { + return fmt.Errorf("empty path components") + } + item := filepath.Join(prefix, pathComponents[0]) + for _, node := range tree.Nodes { + if node.Name == pathComponents[0] { + switch { + case l == 1 && node.Type == "file": + return dumpNode(ctx, repo, node) + case l > 1 && node.Type == "dir": + subtree, err := repo.LoadTree(ctx, *node.Subtree) + if err != nil { + return errors.Wrapf(err, "cannot load subtree for %q", item) + } + return printFromTree(ctx, subtree, repo, item, pathComponents[1:]) + case l > 1: + return fmt.Errorf("%q should be a dir, but s a %q", item, node.Type) + case node.Type != "file": + return fmt.Errorf("%q should be a file, but is a %q", item, node.Type) + } + } + } + return fmt.Errorf("path %q not found in snapshot", item) +} + +func runDump(opts DumpOptions, gopts GlobalOptions, args []string) error { + ctx := gopts.ctx + + if len(args) != 2 { + return errors.Fatal("no file and no snapshot ID specified") + } + + snapshotIDString := args[0] + pathToPrint := args[1] + + debug.Log("dump file %q from %q", pathToPrint, snapshotIDString) + + splittedPath := splitPath(pathToPrint) + + repo, err := OpenRepository(gopts) + if err != nil { + return err + } + + if !gopts.NoLock { + lock, err := lockRepo(repo) + defer unlockRepo(lock) + if err != nil { + return err + } + } + + err = repo.LoadIndex(ctx) + if err != nil { + return err + } + + var id restic.ID + + if snapshotIDString == "latest" { + id, err = restic.FindLatestSnapshot(ctx, repo, opts.Paths, opts.Tags, opts.Host) + if err != nil { + Exitf(1, "latest snapshot for criteria not found: %v Paths:%v Host:%v", err, opts.Paths, opts.Host) + } + } else { + id, err = restic.FindSnapshot(repo, snapshotIDString) + if err != nil { + Exitf(1, "invalid id %q: %v", snapshotIDString, err) + } + } + + sn, err := restic.LoadSnapshot(gopts.ctx, repo, id) + if err != nil { + Exitf(2, "loading snapshot %q failed: %v", snapshotIDString, err) + } + + tree, err := repo.LoadTree(ctx, *sn.Tree) + if err != nil { + Exitf(2, "loading tree for snapshot %q failed: %v", snapshotIDString, err) + } + + err = printFromTree(ctx, tree, repo, "", splittedPath) + if err != nil { + Exitf(2, "cannot dump file: %v", err) + } + + return nil +} diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go new file mode 100644 index 000000000..48b952f5d --- /dev/null +++ b/cmd/restic/cmd_find.go @@ -0,0 +1,565 @@ +package main + +import ( + "context" + "encoding/json" + "strings" + "time" + + "github.com/spf13/cobra" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/filter" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/walker" +) + +var cmdFind = &cobra.Command{ + Use: "find [flags] PATTERN...", + Short: "Find a file, a directory or restic IDs", + Long: ` +The "find" command searches for files or directories in snapshots stored in the +repo. +It can also be used to search for restic blobs or trees for troubleshooting.`, + Example: `restic find config.json +restic find --json "*.yml" "*.json" +restic find --json --blob 420f620f b46ebe8a ddd38656 +restic find --show-pack-id --blob 420f620f +restic find --tree 577c2bc9 f81f2e22 a62827a9 +restic find --pack 025c1d06`, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runFind(findOptions, globalOptions, args) + }, +} + +const shortStr = 8 // Length of short IDs: 4 bytes as hex strings + +// FindOptions bundles all options for the find command. +type FindOptions struct { + Oldest string + Newest string + Snapshots []string + BlobID, TreeID bool + PackID, ShowPackID bool + CaseInsensitive bool + ListLong bool + Host string + Paths []string + Tags restic.TagLists +} + +var findOptions FindOptions + +func init() { + cmdRoot.AddCommand(cmdFind) + + f := cmdFind.Flags() + f.StringVarP(&findOptions.Oldest, "oldest", "O", "", "oldest modification date/time") + f.StringVarP(&findOptions.Newest, "newest", "N", "", "newest modification date/time") + f.StringArrayVarP(&findOptions.Snapshots, "snapshot", "s", nil, "snapshot `id` to search in (can be given multiple times)") + f.BoolVar(&findOptions.BlobID, "blob", false, "pattern is a blob-ID") + f.BoolVar(&findOptions.TreeID, "tree", false, "pattern is a tree-ID") + f.BoolVar(&findOptions.PackID, "pack", false, "pattern is a pack-ID") + f.BoolVar(&findOptions.ShowPackID, "show-pack-id", false, "display the pack-ID the blobs belong to (with --blob)") + f.BoolVarP(&findOptions.CaseInsensitive, "ignore-case", "i", false, "ignore case for pattern") + f.BoolVarP(&findOptions.ListLong, "long", "l", false, "use a long listing format showing size and mode") + + f.StringVarP(&findOptions.Host, "host", "H", "", "only consider snapshots for this `host`, when no snapshot ID is given") + f.Var(&findOptions.Tags, "tag", "only consider snapshots which include this `taglist`, when no snapshot-ID is given") + f.StringArrayVar(&findOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path`, when no snapshot-ID is given") +} + +type findPattern struct { + oldest, newest time.Time + pattern []string + ignoreCase bool +} + +var timeFormats = []string{ + "2006-01-02", + "2006-01-02 15:04", + "2006-01-02 15:04:05", + "2006-01-02 15:04:05 -0700", + "2006-01-02 15:04:05 MST", + "02.01.2006", + "02.01.2006 15:04", + "02.01.2006 15:04:05", + "02.01.2006 15:04:05 -0700", + "02.01.2006 15:04:05 MST", + "Mon Jan 2 15:04:05 -0700 MST 2006", +} + +func parseTime(str string) (time.Time, error) { + for _, fmt := range timeFormats { + if t, err := time.ParseInLocation(fmt, str, time.Local); err == nil { + return t, nil + } + } + + return time.Time{}, errors.Fatalf("unable to parse time: %q", str) +} + +type statefulOutput struct { + ListLong bool + JSON bool + inuse bool + newsn *restic.Snapshot + oldsn *restic.Snapshot + hits int +} + +func (s *statefulOutput) PrintPatternJSON(path string, node *restic.Node) { + type findNode restic.Node + b, err := json.Marshal(struct { + // Add these attributes + Path string `json:"path,omitempty"` + Permissions string `json:"permissions,omitempty"` + + *findNode + + // Make the following attributes disappear + Name byte `json:"name,omitempty"` + Inode byte `json:"inode,omitempty"` + ExtendedAttributes byte `json:"extended_attributes,omitempty"` + Device byte `json:"device,omitempty"` + Content byte `json:"content,omitempty"` + Subtree byte `json:"subtree,omitempty"` + }{ + Path: path, + Permissions: node.Mode.String(), + findNode: (*findNode)(node), + }) + if err != nil { + Warnf("Marshall failed: %v\n", err) + return + } + if !s.inuse { + Printf("[") + s.inuse = true + } + if s.newsn != s.oldsn { + if s.oldsn != nil { + Printf("],\"hits\":%d,\"snapshot\":%q},", s.hits, s.oldsn.ID()) + } + Printf(`{"matches":[`) + s.oldsn = s.newsn + s.hits = 0 + } + if s.hits > 0 { + Printf(",") + } + Printf(string(b)) + s.hits++ +} + +func (s *statefulOutput) PrintPatternNormal(path string, node *restic.Node) { + if s.newsn != s.oldsn { + if s.oldsn != nil { + Verbosef("\n") + } + s.oldsn = s.newsn + Verbosef("Found matching entries in snapshot %s\n", s.oldsn.ID().Str()) + } + Printf(formatNode(path, node, s.ListLong) + "\n") +} + +func (s *statefulOutput) PrintPattern(path string, node *restic.Node) { + if s.JSON { + s.PrintPatternJSON(path, node) + } else { + s.PrintPatternNormal(path, node) + } +} + +func (s *statefulOutput) PrintObjectJSON(kind, id, nodepath, treeID string, sn *restic.Snapshot) { + b, err := json.Marshal(struct { + // Add these attributes + ObjectType string `json:"object_type"` + ID string `json:"id"` + Path string `json:"path"` + ParentTree string `json:"parent_tree,omitempty"` + SnapshotID string `json:"snapshot"` + Time time.Time `json:"time,omitempty"` + }{ + ObjectType: kind, + ID: id, + Path: nodepath, + SnapshotID: sn.ID().String(), + ParentTree: treeID, + Time: sn.Time, + }) + if err != nil { + Warnf("Marshall failed: %v\n", err) + return + } + if !s.inuse { + Printf("[") + s.inuse = true + } + if s.hits > 0 { + Printf(",") + } + Printf(string(b)) + s.hits++ +} + +func (s *statefulOutput) PrintObjectNormal(kind, id, nodepath, treeID string, sn *restic.Snapshot) { + Printf("Found %s %s\n", kind, id) + if kind == "blob" { + Printf(" ... in file %s\n", nodepath) + Printf(" (tree %s)\n", treeID) + } else { + Printf(" ... path %s\n", nodepath) + } + Printf(" ... in snapshot %s (%s)\n", sn.ID().Str(), sn.Time.Local().Format(TimeFormat)) +} + +func (s *statefulOutput) PrintObject(kind, id, nodepath, treeID string, sn *restic.Snapshot) { + if s.JSON { + s.PrintObjectJSON(kind, id, nodepath, treeID, sn) + } else { + s.PrintObjectNormal(kind, id, nodepath, treeID, sn) + } +} + +func (s *statefulOutput) Finish() { + if s.JSON { + // do some finishing up + if s.oldsn != nil { + Printf("],\"hits\":%d,\"snapshot\":%q}", s.hits, s.oldsn.ID()) + } + if s.inuse { + Printf("]\n") + } else { + Printf("[]\n") + } + return + } +} + +// Finder bundles information needed to find a file or directory. +type Finder struct { + repo restic.Repository + pat findPattern + out statefulOutput + ignoreTrees restic.IDSet + blobIDs map[string]struct{} + treeIDs map[string]struct{} + itemsFound int +} + +func (f *Finder) findInSnapshot(ctx context.Context, sn *restic.Snapshot) error { + debug.Log("searching in snapshot %s\n for entries within [%s %s]", sn.ID(), f.pat.oldest, f.pat.newest) + + if sn.Tree == nil { + return errors.Errorf("snapshot %v has no tree", sn.ID().Str()) + } + + f.out.newsn = sn + return walker.Walk(ctx, f.repo, *sn.Tree, f.ignoreTrees, func(_ restic.ID, nodepath string, node *restic.Node, err error) (bool, error) { + if err != nil { + return false, err + } + + if node == nil { + return false, nil + } + + normalizedNodepath := nodepath + if f.pat.ignoreCase { + normalizedNodepath = strings.ToLower(nodepath) + } + + var foundMatch bool + + for _, pat := range f.pat.pattern { + found, err := filter.Match(pat, normalizedNodepath) + if err != nil { + return false, err + } + if found { + foundMatch = true + break + } + } + + var ( + ignoreIfNoMatch = true + errIfNoMatch error + ) + if node.Type == "dir" { + var childMayMatch bool + for _, pat := range f.pat.pattern { + mayMatch, err := filter.ChildMatch(pat, normalizedNodepath) + if err != nil { + return false, err + } + if mayMatch { + childMayMatch = true + break + } + } + + if !childMayMatch { + ignoreIfNoMatch = true + errIfNoMatch = walker.SkipNode + } else { + ignoreIfNoMatch = false + } + } + + if !foundMatch { + return ignoreIfNoMatch, errIfNoMatch + } + + if !f.pat.oldest.IsZero() && node.ModTime.Before(f.pat.oldest) { + debug.Log(" ModTime is older than %s\n", f.pat.oldest) + return ignoreIfNoMatch, errIfNoMatch + } + + if !f.pat.newest.IsZero() && node.ModTime.After(f.pat.newest) { + debug.Log(" ModTime is newer than %s\n", f.pat.newest) + return ignoreIfNoMatch, errIfNoMatch + } + + debug.Log(" found match\n") + f.out.PrintPattern(nodepath, node) + return false, nil + }) +} + +func (f *Finder) findIDs(ctx context.Context, sn *restic.Snapshot) error { + debug.Log("searching IDs in snapshot %s", sn.ID()) + + if sn.Tree == nil { + return errors.Errorf("snapshot %v has no tree", sn.ID().Str()) + } + + f.out.newsn = sn + return walker.Walk(ctx, f.repo, *sn.Tree, f.ignoreTrees, func(parentTreeID restic.ID, nodepath string, node *restic.Node, err error) (bool, error) { + if err != nil { + return false, err + } + + if node == nil { + return false, nil + } + + if node.Type == "dir" && f.treeIDs != nil { + treeID := node.Subtree + found := false + if _, ok := f.treeIDs[treeID.Str()]; ok { + found = true + } else if _, ok := f.treeIDs[treeID.String()]; ok { + found = true + } + if found { + f.out.PrintObject("tree", treeID.String(), nodepath, "", sn) + f.itemsFound++ + // Terminate if we have found all trees (and we are not + // looking for blobs) + if f.itemsFound >= len(f.treeIDs) && f.blobIDs == nil { + // Return an error to terminate the Walk + return true, errors.New("OK") + } + } + } + + if node.Type == "file" && f.blobIDs != nil { + for _, id := range node.Content { + idStr := id.String() + if _, ok := f.blobIDs[idStr]; !ok { + // Look for short ID form + if _, ok := f.blobIDs[idStr[:shortStr]]; !ok { + continue + } + // Replace the short ID with the long one + f.blobIDs[idStr] = struct{}{} + delete(f.blobIDs, idStr[:shortStr]) + } + f.out.PrintObject("blob", idStr, nodepath, parentTreeID.String(), sn) + break + } + } + + return false, nil + }) +} + +// packsToBlobs converts the list of pack IDs to a list of blob IDs that +// belong to those packs. +func (f *Finder) packsToBlobs(ctx context.Context, packs []string) error { + packIDs := make(map[string]struct{}) + for _, p := range packs { + packIDs[p] = struct{}{} + } + if f.blobIDs == nil { + f.blobIDs = make(map[string]struct{}) + } + + allPacksFound := false + packsFound := 0 + + debug.Log("Looking for packs...") + err := f.repo.List(ctx, restic.DataFile, func(id restic.ID, size int64) error { + if allPacksFound { + return nil + } + idStr := id.String() + if _, ok := packIDs[idStr]; !ok { + // Look for short ID form + if _, ok := packIDs[idStr[:shortStr]]; !ok { + return nil + } + } + debug.Log("Found pack %s", idStr) + blobs, _, err := f.repo.ListPack(ctx, id, size) + if err != nil { + return err + } + for _, b := range blobs { + f.blobIDs[b.ID.String()] = struct{}{} + } + // Stop searching when all packs have been found + packsFound++ + if packsFound >= len(packIDs) { + allPacksFound = true + } + return nil + }) + + if err != nil { + return err + } + + if !allPacksFound { + return errors.Fatal("unable to find all specified pack(s)") + } + + debug.Log("%d blobs found", len(f.blobIDs)) + return nil +} + +func (f *Finder) findBlobsPacks(ctx context.Context) { + idx := f.repo.Index() + for i := range f.blobIDs { + rid, err := restic.ParseID(i) + if err != nil { + Printf("Note: cannot find pack for blob '%s', unable to parse ID: %v\n", i, err) + continue + } + + blobs, found := idx.Lookup(rid, restic.DataBlob) + if !found { + Printf("Blob %s not found in the index\n", rid.Str()) + continue + } + + for _, b := range blobs { + if b.ID.Equal(rid) { + Printf("Blob belongs to pack %s\n ... Pack %s: %s\n", b.PackID, b.PackID.Str(), b.String()) + break + } + } + } +} + +func runFind(opts FindOptions, gopts GlobalOptions, args []string) error { + if len(args) == 0 { + return errors.Fatal("wrong number of arguments") + } + + var err error + pat := findPattern{pattern: args} + if opts.CaseInsensitive { + for i := range pat.pattern { + pat.pattern[i] = strings.ToLower(pat.pattern[i]) + } + pat.ignoreCase = true + } + + if opts.Oldest != "" { + if pat.oldest, err = parseTime(opts.Oldest); err != nil { + return err + } + } + + if opts.Newest != "" { + if pat.newest, err = parseTime(opts.Newest); err != nil { + return err + } + } + + // Check at most only one kind of IDs is provided: currently we + // can't mix types + if (opts.BlobID && opts.TreeID) || + (opts.BlobID && opts.PackID) || + (opts.TreeID && opts.PackID) { + return errors.Fatal("cannot have several ID types") + } + + repo, err := OpenRepository(gopts) + if err != nil { + return err + } + + if !gopts.NoLock { + lock, err := lockRepo(repo) + defer unlockRepo(lock) + if err != nil { + return err + } + } + + if err = repo.LoadIndex(gopts.ctx); err != nil { + return err + } + + ctx, cancel := context.WithCancel(gopts.ctx) + defer cancel() + + f := &Finder{ + repo: repo, + pat: pat, + out: statefulOutput{ListLong: opts.ListLong, JSON: globalOptions.JSON}, + ignoreTrees: restic.NewIDSet(), + } + + if opts.BlobID { + f.blobIDs = make(map[string]struct{}) + for _, pat := range f.pat.pattern { + f.blobIDs[pat] = struct{}{} + } + } + if opts.TreeID { + f.treeIDs = make(map[string]struct{}) + for _, pat := range f.pat.pattern { + f.treeIDs[pat] = struct{}{} + } + } + + if opts.PackID { + f.packsToBlobs(ctx, []string{f.pat.pattern[0]}) // TODO: support multiple packs + } + + for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, opts.Snapshots) { + if f.blobIDs != nil || f.treeIDs != nil { + if err = f.findIDs(ctx, sn); err != nil && err.Error() != "OK" { + return err + } + continue + } + if err = f.findInSnapshot(ctx, sn); err != nil { + return err + } + } + f.out.Finish() + + if opts.ShowPackID && f.blobIDs != nil { + f.findBlobsPacks(ctx) + } + + return nil +} diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go new file mode 100644 index 000000000..bafd540bd --- /dev/null +++ b/cmd/restic/cmd_forget.go @@ -0,0 +1,246 @@ +package main + +import ( + "context" + "encoding/json" + "sort" + "strings" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + "github.com/spf13/cobra" +) + +var cmdForget = &cobra.Command{ + Use: "forget [flags] [snapshot ID] [...]", + Short: "Remove snapshots from the repository", + Long: ` +The "forget" command removes snapshots according to a policy. Please note that +this command really only deletes the snapshot object in the repository, which +is a reference to data stored there. In order to remove this (now unreferenced) +data after 'forget' was run successfully, see the 'prune' command. `, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runForget(forgetOptions, globalOptions, args) + }, +} + +// ForgetOptions collects all options for the forget command. +type ForgetOptions struct { + Last int + Hourly int + Daily int + Weekly int + Monthly int + Yearly int + Within restic.Duration + KeepTags restic.TagLists + + Host string + Tags restic.TagLists + Paths []string + Compact bool + + // Grouping + GroupBy string + DryRun bool + Prune bool +} + +var forgetOptions ForgetOptions + +func init() { + cmdRoot.AddCommand(cmdForget) + + f := cmdForget.Flags() + f.IntVarP(&forgetOptions.Last, "keep-last", "l", 0, "keep the last `n` snapshots") + f.IntVarP(&forgetOptions.Hourly, "keep-hourly", "H", 0, "keep the last `n` hourly snapshots") + f.IntVarP(&forgetOptions.Daily, "keep-daily", "d", 0, "keep the last `n` daily snapshots") + f.IntVarP(&forgetOptions.Weekly, "keep-weekly", "w", 0, "keep the last `n` weekly snapshots") + f.IntVarP(&forgetOptions.Monthly, "keep-monthly", "m", 0, "keep the last `n` monthly snapshots") + f.IntVarP(&forgetOptions.Yearly, "keep-yearly", "y", 0, "keep the last `n` yearly snapshots") + f.VarP(&forgetOptions.Within, "keep-within", "", "keep snapshots that are newer than `duration` (eg. 1y5m7d2h) relative to the latest snapshot") + + f.Var(&forgetOptions.KeepTags, "keep-tag", "keep snapshots with this `taglist` (can be specified multiple times)") + f.StringVar(&forgetOptions.Host, "host", "", "only consider snapshots with the given `host`") + f.StringVar(&forgetOptions.Host, "hostname", "", "only consider snapshots with the given `hostname`") + f.MarkDeprecated("hostname", "use --host") + + f.Var(&forgetOptions.Tags, "tag", "only consider snapshots which include this `taglist` in the format `tag[,tag,...]` (can be specified multiple times)") + + f.StringArrayVar(&forgetOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path` (can be specified multiple times)") + f.BoolVarP(&forgetOptions.Compact, "compact", "c", false, "use compact format") + + f.StringVarP(&forgetOptions.GroupBy, "group-by", "g", "host,paths", "string for grouping snapshots by host,paths,tags") + f.BoolVarP(&forgetOptions.DryRun, "dry-run", "n", false, "do not delete anything, just print what would be done") + f.BoolVar(&forgetOptions.Prune, "prune", false, "automatically run the 'prune' command if snapshots have been removed") + + f.SortFlags = false +} + +func runForget(opts ForgetOptions, gopts GlobalOptions, args []string) error { + repo, err := OpenRepository(gopts) + if err != nil { + return err + } + + lock, err := lockRepoExclusive(repo) + defer unlockRepo(lock) + if err != nil { + return err + } + + // group by hostname and dirs + type key struct { + Hostname string + Paths []string + Tags []string + } + snapshotGroups := make(map[string]restic.Snapshots) + + var GroupByTag bool + var GroupByHost bool + var GroupByPath bool + var GroupOptionList []string + + GroupOptionList = strings.Split(opts.GroupBy, ",") + + for _, option := range GroupOptionList { + switch option { + case "host": + GroupByHost = true + case "paths": + GroupByPath = true + case "tags": + GroupByTag = true + case "": + default: + return errors.Fatal("unknown grouping option: '" + option + "'") + } + } + + removeSnapshots := 0 + + ctx, cancel := context.WithCancel(gopts.ctx) + defer cancel() + for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, args) { + if len(args) > 0 { + // When explicit snapshots args are given, remove them immediately. + if !opts.DryRun { + h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()} + if err = repo.Backend().Remove(gopts.ctx, h); err != nil { + return err + } + Verbosef("removed snapshot %v\n", sn.ID().Str()) + removeSnapshots++ + } else { + Verbosef("would have removed snapshot %v\n", sn.ID().Str()) + } + } else { + // Determining grouping-keys + var tags []string + var hostname string + var paths []string + + if GroupByTag { + tags = sn.Tags + sort.StringSlice(tags).Sort() + } + if GroupByHost { + hostname = sn.Hostname + } + if GroupByPath { + paths = sn.Paths + } + + sort.StringSlice(sn.Paths).Sort() + var k []byte + var err error + + k, err = json.Marshal(key{Tags: tags, Hostname: hostname, Paths: paths}) + + if err != nil { + return err + } + snapshotGroups[string(k)] = append(snapshotGroups[string(k)], sn) + } + } + + policy := restic.ExpirePolicy{ + Last: opts.Last, + Hourly: opts.Hourly, + Daily: opts.Daily, + Weekly: opts.Weekly, + Monthly: opts.Monthly, + Yearly: opts.Yearly, + Within: opts.Within, + Tags: opts.KeepTags, + } + + if policy.Empty() && len(args) == 0 { + Verbosef("no policy was specified, no snapshots will be removed\n") + } + + if !policy.Empty() { + Verbosef("Applying Policy: %v\n", policy) + + for k, snapshotGroup := range snapshotGroups { + var key key + if json.Unmarshal([]byte(k), &key) != nil { + return err + } + + // Info + Verbosef("snapshots") + var infoStrings []string + if GroupByTag { + infoStrings = append(infoStrings, "tags ["+strings.Join(key.Tags, ", ")+"]") + } + if GroupByHost { + infoStrings = append(infoStrings, "host ["+key.Hostname+"]") + } + if GroupByPath { + infoStrings = append(infoStrings, "paths ["+strings.Join(key.Paths, ", ")+"]") + } + if infoStrings != nil { + Verbosef(" for (" + strings.Join(infoStrings, ", ") + ")") + } + Verbosef(":\n\n") + + keep, remove, reasons := restic.ApplyPolicy(snapshotGroup, policy) + + if len(keep) != 0 && !gopts.Quiet { + Printf("keep %d snapshots:\n", len(keep)) + PrintSnapshots(globalOptions.stdout, keep, reasons, opts.Compact) + Printf("\n") + } + + if len(remove) != 0 && !gopts.Quiet { + Printf("remove %d snapshots:\n", len(remove)) + PrintSnapshots(globalOptions.stdout, remove, nil, opts.Compact) + Printf("\n") + } + + removeSnapshots += len(remove) + + if !opts.DryRun { + for _, sn := range remove { + h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()} + err = repo.Backend().Remove(gopts.ctx, h) + if err != nil { + return err + } + } + } + } + } + + if removeSnapshots > 0 && opts.Prune { + Verbosef("%d snapshots have been removed, running prune\n", removeSnapshots) + if !opts.DryRun { + return pruneRepository(gopts, repo) + } + } + + return nil +} diff --git a/cmd/restic/cmd_generate.go b/cmd/restic/cmd_generate.go new file mode 100644 index 000000000..5c42537dc --- /dev/null +++ b/cmd/restic/cmd_generate.go @@ -0,0 +1,94 @@ +package main + +import ( + "time" + + "github.com/restic/restic/internal/errors" + "github.com/spf13/cobra" + "github.com/spf13/cobra/doc" +) + +var cmdGenerate = &cobra.Command{ + Use: "generate [command]", + Short: "Generate manual pages and auto-completion files (bash, zsh)", + Long: ` +The "generate" command writes automatically generated files like the man pages +and the auto-completion files for bash and zsh). +`, + DisableAutoGenTag: true, + RunE: runGenerate, +} + +type generateOptions struct { + ManDir string + BashCompletionFile string + ZSHCompletionFile string +} + +var genOpts generateOptions + +func init() { + cmdRoot.AddCommand(cmdGenerate) + fs := cmdGenerate.Flags() + fs.StringVar(&genOpts.ManDir, "man", "", "write man pages to `directory`") + fs.StringVar(&genOpts.BashCompletionFile, "bash-completion", "", "write bash completion `file`") + fs.StringVar(&genOpts.ZSHCompletionFile, "zsh-completion", "", "write zsh completion `file`") +} + +func writeManpages(dir string) error { + // use a fixed date for the man pages so that generating them is deterministic + date, err := time.Parse("Jan 2006", "Jan 2017") + if err != nil { + return err + } + + header := &doc.GenManHeader{ + Title: "restic backup", + Section: "1", + Source: "generated by `restic generate`", + Date: &date, + } + + Verbosef("writing man pages to directory %v\n", dir) + return doc.GenManTree(cmdRoot, header, dir) +} + +func writeBashCompletion(file string) error { + Verbosef("writing bash completion file to %v\n", file) + return cmdRoot.GenBashCompletionFile(file) +} + +func writeZSHCompletion(file string) error { + Verbosef("writing zsh completion file to %v\n", file) + return cmdRoot.GenZshCompletionFile(file) +} + +func runGenerate(cmd *cobra.Command, args []string) error { + if genOpts.ManDir != "" { + err := writeManpages(genOpts.ManDir) + if err != nil { + return err + } + } + + if genOpts.BashCompletionFile != "" { + err := writeBashCompletion(genOpts.BashCompletionFile) + if err != nil { + return err + } + } + + if genOpts.ZSHCompletionFile != "" { + err := writeZSHCompletion(genOpts.ZSHCompletionFile) + if err != nil { + return err + } + } + + var empty generateOptions + if genOpts == empty { + return errors.Fatal("nothing to do, please specify at least one output file/dir") + } + + return nil +} diff --git a/cmd/restic/cmd_init.go b/cmd/restic/cmd_init.go new file mode 100644 index 000000000..cb6d6fbdb --- /dev/null +++ b/cmd/restic/cmd_init.go @@ -0,0 +1,57 @@ +package main + +import ( + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/repository" + + "github.com/spf13/cobra" +) + +var cmdInit = &cobra.Command{ + Use: "init", + Short: "Initialize a new repository", + Long: ` +The "init" command initializes a new repository. +`, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runInit(globalOptions, args) + }, +} + +func init() { + cmdRoot.AddCommand(cmdInit) +} + +func runInit(gopts GlobalOptions, args []string) error { + if gopts.Repo == "" { + return errors.Fatal("Please specify repository location (-r)") + } + + be, err := create(gopts.Repo, gopts.extended) + if err != nil { + return errors.Fatalf("create repository at %s failed: %v\n", gopts.Repo, err) + } + + gopts.password, err = ReadPasswordTwice(gopts, + "enter password for new repository: ", + "enter password again: ") + if err != nil { + return err + } + + s := repository.New(be) + + err = s.Init(gopts.ctx, gopts.password) + if err != nil { + return errors.Fatalf("create key in repository at %s failed: %v\n", gopts.Repo, err) + } + + Verbosef("created restic repository %v at %s\n", s.Config().ID[:10], gopts.Repo) + Verbosef("\n") + Verbosef("Please note that knowledge of your password is required to access\n") + Verbosef("the repository. Losing your password means that your data is\n") + Verbosef("irrecoverably lost.\n") + + return nil +} diff --git a/cmd/restic/cmd_key.go b/cmd/restic/cmd_key.go new file mode 100644 index 000000000..38ad77790 --- /dev/null +++ b/cmd/restic/cmd_key.go @@ -0,0 +1,227 @@ +package main + +import ( + "context" + "encoding/json" + "io/ioutil" + "os" + "strings" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/table" + + "github.com/spf13/cobra" +) + +var cmdKey = &cobra.Command{ + Use: "key [list|add|remove|passwd] [ID]", + Short: "Manage keys (passwords)", + Long: ` +The "key" command manages keys (passwords) for accessing the repository. +`, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runKey(globalOptions, args) + }, +} + +var newPasswordFile string + +func init() { + cmdRoot.AddCommand(cmdKey) + + flags := cmdKey.Flags() + flags.StringVarP(&newPasswordFile, "new-password-file", "", "", "the file from which to load a new password") +} + +func listKeys(ctx context.Context, s *repository.Repository, gopts GlobalOptions) error { + type keyInfo struct { + Current bool `json:"current"` + ID string `json:"id"` + UserName string `json:"userName"` + HostName string `json:"hostName"` + Created string `json:"created"` + } + + var keys []keyInfo + + err := s.List(ctx, restic.KeyFile, func(id restic.ID, size int64) error { + k, err := repository.LoadKey(ctx, s, id.String()) + if err != nil { + Warnf("LoadKey() failed: %v\n", err) + return nil + } + + key := keyInfo{ + Current: id.String() == s.KeyName(), + ID: id.Str(), + UserName: k.Username, + HostName: k.Hostname, + Created: k.Created.Local().Format(TimeFormat), + } + + keys = append(keys, key) + return nil + }) + + if err != nil { + return err + } + + if gopts.JSON { + return json.NewEncoder(globalOptions.stdout).Encode(keys) + } + + tab := table.New() + tab.AddColumn(" ID", "{{if .Current}}*{{else}} {{end}}{{ .ID }}") + tab.AddColumn("User", "{{ .UserName }}") + tab.AddColumn("Host", "{{ .HostName }}") + tab.AddColumn("Created", "{{ .Created }}") + + for _, key := range keys { + tab.AddRow(key) + } + + return tab.Write(globalOptions.stdout) +} + +// testKeyNewPassword is used to set a new password during integration testing. +var testKeyNewPassword string + +func getNewPassword(gopts GlobalOptions) (string, error) { + if testKeyNewPassword != "" { + return testKeyNewPassword, nil + } + + if newPasswordFile != "" { + return loadPasswordFromFile(newPasswordFile) + } + + // Since we already have an open repository, temporary remove the password + // to prompt the user for the passwd. + newopts := gopts + newopts.password = "" + + return ReadPasswordTwice(newopts, + "enter password for new key: ", + "enter password again: ") +} + +func addKey(gopts GlobalOptions, repo *repository.Repository) error { + pw, err := getNewPassword(gopts) + if err != nil { + return err + } + + id, err := repository.AddKey(gopts.ctx, repo, pw, repo.Key()) + if err != nil { + return errors.Fatalf("creating new key failed: %v\n", err) + } + + Verbosef("saved new key as %s\n", id) + + return nil +} + +func deleteKey(ctx context.Context, repo *repository.Repository, name string) error { + if name == repo.KeyName() { + return errors.Fatal("refusing to remove key currently used to access repository") + } + + h := restic.Handle{Type: restic.KeyFile, Name: name} + err := repo.Backend().Remove(ctx, h) + if err != nil { + return err + } + + Verbosef("removed key %v\n", name) + return nil +} + +func changePassword(gopts GlobalOptions, repo *repository.Repository) error { + pw, err := getNewPassword(gopts) + if err != nil { + return err + } + + id, err := repository.AddKey(gopts.ctx, repo, pw, repo.Key()) + if err != nil { + return errors.Fatalf("creating new key failed: %v\n", err) + } + + h := restic.Handle{Type: restic.KeyFile, Name: repo.KeyName()} + err = repo.Backend().Remove(gopts.ctx, h) + if err != nil { + return err + } + + Verbosef("saved new key as %s\n", id) + + return nil +} + +func runKey(gopts GlobalOptions, args []string) error { + if len(args) < 1 || (args[0] == "remove" && len(args) != 2) || (args[0] != "remove" && len(args) != 1) { + return errors.Fatal("wrong number of arguments") + } + + ctx, cancel := context.WithCancel(gopts.ctx) + defer cancel() + + repo, err := OpenRepository(gopts) + if err != nil { + return err + } + + switch args[0] { + case "list": + lock, err := lockRepo(repo) + defer unlockRepo(lock) + if err != nil { + return err + } + + return listKeys(ctx, repo, gopts) + case "add": + lock, err := lockRepo(repo) + defer unlockRepo(lock) + if err != nil { + return err + } + + return addKey(gopts, repo) + case "remove": + lock, err := lockRepoExclusive(repo) + defer unlockRepo(lock) + if err != nil { + return err + } + + id, err := restic.Find(repo.Backend(), restic.KeyFile, args[1]) + if err != nil { + return err + } + + return deleteKey(gopts.ctx, repo, id) + case "passwd": + lock, err := lockRepoExclusive(repo) + defer unlockRepo(lock) + if err != nil { + return err + } + + return changePassword(gopts, repo) + } + + return nil +} + +func loadPasswordFromFile(pwdFile string) (string, error) { + s, err := ioutil.ReadFile(pwdFile) + if os.IsNotExist(err) { + return "", errors.Fatalf("%s does not exist", pwdFile) + } + return strings.TrimSpace(string(s)), errors.Wrap(err, "Readfile") +} diff --git a/cmd/restic/cmd_list.go b/cmd/restic/cmd_list.go new file mode 100644 index 000000000..9aa7dc9eb --- /dev/null +++ b/cmd/restic/cmd_list.go @@ -0,0 +1,80 @@ +package main + +import ( + "fmt" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/index" + "github.com/restic/restic/internal/restic" + + "github.com/spf13/cobra" +) + +var cmdList = &cobra.Command{ + Use: "list [blobs|packs|index|snapshots|keys|locks]", + Short: "List objects in the repository", + Long: ` +The "list" command allows listing objects in the repository based on type. +`, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(cmd, globalOptions, args) + }, +} + +func init() { + cmdRoot.AddCommand(cmdList) +} + +func runList(cmd *cobra.Command, opts GlobalOptions, args []string) error { + if len(args) != 1 { + return errors.Fatal("type not specified, usage: " + cmd.Use) + } + + repo, err := OpenRepository(opts) + if err != nil { + return err + } + + if !opts.NoLock { + lock, err := lockRepo(repo) + defer unlockRepo(lock) + if err != nil { + return err + } + } + + var t restic.FileType + switch args[0] { + case "packs": + t = restic.DataFile + case "index": + t = restic.IndexFile + case "snapshots": + t = restic.SnapshotFile + case "keys": + t = restic.KeyFile + case "locks": + t = restic.LockFile + case "blobs": + idx, err := index.Load(opts.ctx, repo, nil) + if err != nil { + return err + } + + for _, pack := range idx.Packs { + for _, entry := range pack.Entries { + fmt.Printf("%v %v\n", entry.Type, entry.ID) + } + } + + return nil + default: + return errors.Fatal("invalid type") + } + + return repo.List(opts.ctx, t, func(id restic.ID, size int64) error { + Printf("%s\n", id) + return nil + }) +} diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go new file mode 100644 index 000000000..db26467cd --- /dev/null +++ b/cmd/restic/cmd_ls.go @@ -0,0 +1,231 @@ +package main + +import ( + "context" + "encoding/json" + "os" + "strings" + "time" + + "github.com/spf13/cobra" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/walker" +) + +var cmdLs = &cobra.Command{ + Use: "ls [flags] [snapshotID] [dir...]", + Short: "List files in a snapshot", + Long: ` +The "ls" command lists files and directories in a snapshot. + +The special snapshot ID "latest" can be used to list files and +directories of the latest snapshot in the repository. The +--host flag can be used in conjunction to select the latest +snapshot originating from a certain host only. + +File listings can optionally be filtered by directories. Any +positional arguments after the snapshot ID are interpreted as +absolute directory paths, and only files inside those directories +will be listed. If the --recursive flag is used, then the filter +will allow traversing into matching directories' subfolders. +Any directory paths specified must be absolute (starting with +a path separator); paths use the forward slash '/' as separator. +`, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runLs(lsOptions, globalOptions, args) + }, +} + +// LsOptions collects all options for the ls command. +type LsOptions struct { + ListLong bool + Host string + Tags restic.TagLists + Paths []string + Recursive bool +} + +var lsOptions LsOptions + +func init() { + cmdRoot.AddCommand(cmdLs) + + flags := cmdLs.Flags() + flags.BoolVarP(&lsOptions.ListLong, "long", "l", false, "use a long listing format showing size and mode") + flags.StringVarP(&lsOptions.Host, "host", "H", "", "only consider snapshots for this `host`, when no snapshot ID is given") + flags.Var(&lsOptions.Tags, "tag", "only consider snapshots which include this `taglist`, when no snapshot ID is given") + flags.StringArrayVar(&lsOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path`, when no snapshot ID is given") + flags.BoolVar(&lsOptions.Recursive, "recursive", false, "include files in subfolders of the listed directories") +} + +type lsSnapshot struct { + *restic.Snapshot + ID *restic.ID `json:"id"` + ShortID string `json:"short_id"` + StructType string `json:"struct_type"` // "snapshot" +} + +type lsNode struct { + Name string `json:"name"` + Type string `json:"type"` + Path string `json:"path"` + UID uint32 `json:"uid"` + GID uint32 `json:"gid"` + Size uint64 `json:"size,omitempty"` + Mode os.FileMode `json:"mode,omitempty"` + ModTime time.Time `json:"mtime,omitempty"` + AccessTime time.Time `json:"atime,omitempty"` + ChangeTime time.Time `json:"ctime,omitempty"` + StructType string `json:"struct_type"` // "node" +} + +func runLs(opts LsOptions, gopts GlobalOptions, args []string) error { + if len(args) == 0 && opts.Host == "" && len(opts.Tags) == 0 && len(opts.Paths) == 0 { + return errors.Fatal("Invalid arguments, either give one or more snapshot IDs or set filters.") + } + + // extract any specific directories to walk + var dirs []string + if len(args) > 1 { + dirs = args[1:] + for _, dir := range dirs { + if !strings.HasPrefix(dir, "/") { + return errors.Fatal("All path filters must be absolute, starting with a forward slash '/'") + } + } + } + + withinDir := func(nodepath string) bool { + if len(dirs) == 0 { + return true + } + + for _, dir := range dirs { + // we're within one of the selected dirs, example: + // nodepath: "/test/foo" + // dir: "/test" + if fs.HasPathPrefix(dir, nodepath) { + return true + } + } + return false + } + + approachingMatchingTree := func(nodepath string) bool { + if len(dirs) == 0 { + return true + } + + for _, dir := range dirs { + // the current node path is a prefix for one of the + // directories, so we're interested in something deeper in the + // tree. Example: + // nodepath: "/test" + // dir: "/test/foo" + if fs.HasPathPrefix(nodepath, dir) { + return true + } + } + return false + } + + repo, err := OpenRepository(gopts) + if err != nil { + return err + } + + if err = repo.LoadIndex(gopts.ctx); err != nil { + return err + } + + ctx, cancel := context.WithCancel(gopts.ctx) + defer cancel() + + var ( + printSnapshot func(sn *restic.Snapshot) + printNode func(path string, node *restic.Node) + ) + + if gopts.JSON { + enc := json.NewEncoder(gopts.stdout) + + printSnapshot = func(sn *restic.Snapshot) { + enc.Encode(lsSnapshot{ + Snapshot: sn, + ID: sn.ID(), + ShortID: sn.ID().Str(), + StructType: "snapshot", + }) + } + + printNode = func(path string, node *restic.Node) { + enc.Encode(lsNode{ + Name: node.Name, + Type: node.Type, + Path: path, + UID: node.UID, + GID: node.GID, + Size: node.Size, + Mode: node.Mode, + ModTime: node.ModTime, + AccessTime: node.AccessTime, + ChangeTime: node.ChangeTime, + StructType: "node", + }) + } + } else { + printSnapshot = func(sn *restic.Snapshot) { + Verbosef("snapshot %s of %v filtered by %v at %s):\n", sn.ID().Str(), sn.Paths, dirs, sn.Time) + } + printNode = func(path string, node *restic.Node) { + Printf("%s\n", formatNode(path, node, lsOptions.ListLong)) + } + } + + for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, args[:1]) { + printSnapshot(sn) + + err := walker.Walk(ctx, repo, *sn.Tree, nil, func(_ restic.ID, nodepath string, node *restic.Node, err error) (bool, error) { + if err != nil { + return false, err + } + if node == nil { + return false, nil + } + + if withinDir(nodepath) { + // if we're within a dir, print the node + printNode(nodepath, node) + + // if recursive listing is requested, signal the walker that it + // should continue walking recursively + if opts.Recursive { + return false, nil + } + } + + // if there's an upcoming match deeper in the tree (but we're not + // there yet), signal the walker to descend into any subdirs + if approachingMatchingTree(nodepath) { + return false, nil + } + + // otherwise, signal the walker to not walk recursively into any + // subdirs + if node.Type == "dir" { + return false, walker.SkipNode + } + return false, nil + }) + + if err != nil { + return err + } + } + + return nil +} diff --git a/cmd/restic/cmd_migrate.go b/cmd/restic/cmd_migrate.go new file mode 100644 index 000000000..d5738595e --- /dev/null +++ b/cmd/restic/cmd_migrate.go @@ -0,0 +1,108 @@ +package main + +import ( + "github.com/restic/restic/internal/migrations" + "github.com/restic/restic/internal/restic" + + "github.com/spf13/cobra" +) + +var cmdMigrate = &cobra.Command{ + Use: "migrate [name]", + Short: "Apply migrations", + Long: ` +The "migrate" command applies migrations to a repository. When no migration +name is explicitly given, a list of migrations that can be applied is printed. +`, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runMigrate(migrateOptions, globalOptions, args) + }, +} + +// MigrateOptions bundles all options for the 'check' command. +type MigrateOptions struct { + Force bool +} + +var migrateOptions MigrateOptions + +func init() { + cmdRoot.AddCommand(cmdMigrate) + f := cmdMigrate.Flags() + f.BoolVarP(&migrateOptions.Force, "force", "f", false, `apply a migration a second time`) +} + +func checkMigrations(opts MigrateOptions, gopts GlobalOptions, repo restic.Repository) error { + ctx := gopts.ctx + Printf("available migrations:\n") + for _, m := range migrations.All { + ok, err := m.Check(ctx, repo) + if err != nil { + return err + } + + if ok { + Printf(" %v: %v\n", m.Name(), m.Desc()) + } + } + + return nil +} + +func applyMigrations(opts MigrateOptions, gopts GlobalOptions, repo restic.Repository, args []string) error { + ctx := gopts.ctx + + var firsterr error + for _, name := range args { + for _, m := range migrations.All { + if m.Name() == name { + ok, err := m.Check(ctx, repo) + if err != nil { + return err + } + + if !ok { + if !opts.Force { + Warnf("migration %v cannot be applied: check failed\nIf you want to apply this migration anyway, re-run with option --force\n", m.Name()) + continue + } + + Warnf("check for migration %v failed, continuing anyway\n", m.Name()) + } + + Printf("applying migration %v...\n", m.Name()) + if err = m.Apply(ctx, repo); err != nil { + Warnf("migration %v failed: %v\n", m.Name(), err) + if firsterr == nil { + firsterr = err + } + continue + } + + Printf("migration %v: success\n", m.Name()) + } + } + } + + return firsterr +} + +func runMigrate(opts MigrateOptions, gopts GlobalOptions, args []string) error { + repo, err := OpenRepository(gopts) + if err != nil { + return err + } + + lock, err := lockRepoExclusive(repo) + defer unlockRepo(lock) + if err != nil { + return err + } + + if len(args) == 0 { + return checkMigrations(opts, gopts, repo) + } + + return applyMigrations(opts, gopts, repo, args) +} diff --git a/cmd/restic/cmd_mount.go b/cmd/restic/cmd_mount.go new file mode 100644 index 000000000..39ff1a144 --- /dev/null +++ b/cmd/restic/cmd_mount.go @@ -0,0 +1,193 @@ +// +build !netbsd +// +build !openbsd +// +build !solaris +// +build !windows + +package main + +import ( + "os" + "strings" + "time" + + "github.com/spf13/cobra" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + + resticfs "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/fuse" + + systemFuse "bazil.org/fuse" + "bazil.org/fuse/fs" +) + +var cmdMount = &cobra.Command{ + Use: "mount [flags] mountpoint", + Short: "Mount the repository", + Long: ` +The "mount" command mounts the repository via fuse to a directory. This is a +read-only mount. + +Snapshot Directories +==================== + +If you need a different template for all directories that contain snapshots, +you can pass a template via --snapshot-template. Example without colons: + + --snapshot-template "2006-01-02_15-04-05" + +You need to specify a sample format for exactly the following timestamp: + + Mon Jan 2 15:04:05 -0700 MST 2006 + +For details please see the documentation for time.Format() at: + https://godoc.org/time#Time.Format +`, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runMount(mountOptions, globalOptions, args) + }, +} + +// MountOptions collects all options for the mount command. +type MountOptions struct { + OwnerRoot bool + AllowRoot bool + AllowOther bool + NoDefaultPermissions bool + Host string + Tags restic.TagLists + Paths []string + SnapshotTemplate string +} + +var mountOptions MountOptions + +func init() { + cmdRoot.AddCommand(cmdMount) + + mountFlags := cmdMount.Flags() + mountFlags.BoolVar(&mountOptions.OwnerRoot, "owner-root", false, "use 'root' as the owner of files and dirs") + mountFlags.BoolVar(&mountOptions.AllowRoot, "allow-root", false, "allow root user to access the data in the mounted directory") + mountFlags.BoolVar(&mountOptions.AllowOther, "allow-other", false, "allow other users to access the data in the mounted directory") + mountFlags.BoolVar(&mountOptions.NoDefaultPermissions, "no-default-permissions", false, "for 'allow-other', ignore Unix permissions and allow users to read all snapshot files") + + mountFlags.StringVarP(&mountOptions.Host, "host", "H", "", `only consider snapshots for this host`) + mountFlags.Var(&mountOptions.Tags, "tag", "only consider snapshots which include this `taglist`") + mountFlags.StringArrayVar(&mountOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path`") + + mountFlags.StringVar(&mountOptions.SnapshotTemplate, "snapshot-template", time.RFC3339, "set `template` to use for snapshot dirs") +} + +func mount(opts MountOptions, gopts GlobalOptions, mountpoint string) error { + debug.Log("start mount") + defer debug.Log("finish mount") + + repo, err := OpenRepository(gopts) + if err != nil { + return err + } + + lock, err := lockRepo(repo) + defer unlockRepo(lock) + if err != nil { + return err + } + + err = repo.LoadIndex(gopts.ctx) + if err != nil { + return err + } + + if _, err := resticfs.Stat(mountpoint); os.IsNotExist(errors.Cause(err)) { + Verbosef("Mountpoint %s doesn't exist, creating it\n", mountpoint) + err = resticfs.Mkdir(mountpoint, os.ModeDir|0700) + if err != nil { + return err + } + } + + mountOptions := []systemFuse.MountOption{ + systemFuse.ReadOnly(), + systemFuse.FSName("restic"), + } + + if opts.AllowRoot { + mountOptions = append(mountOptions, systemFuse.AllowRoot()) + } + + if opts.AllowOther { + mountOptions = append(mountOptions, systemFuse.AllowOther()) + + // let the kernel check permissions unless it is explicitly disabled + if !opts.NoDefaultPermissions { + mountOptions = append(mountOptions, systemFuse.DefaultPermissions()) + } + } + + c, err := systemFuse.Mount(mountpoint, mountOptions...) + if err != nil { + return err + } + + systemFuse.Debug = func(msg interface{}) { + debug.Log("fuse: %v", msg) + } + + cfg := fuse.Config{ + OwnerIsRoot: opts.OwnerRoot, + Host: opts.Host, + Tags: opts.Tags, + Paths: opts.Paths, + SnapshotTemplate: opts.SnapshotTemplate, + } + root, err := fuse.NewRoot(gopts.ctx, repo, cfg) + if err != nil { + return err + } + + Printf("Now serving the repository at %s\n", mountpoint) + Printf("Don't forget to umount after quitting!\n") + + debug.Log("serving mount at %v", mountpoint) + err = fs.Serve(c, root) + if err != nil { + return err + } + + <-c.Ready + return c.MountError +} + +func umount(mountpoint string) error { + return systemFuse.Unmount(mountpoint) +} + +func runMount(opts MountOptions, gopts GlobalOptions, args []string) error { + if opts.SnapshotTemplate == "" { + return errors.Fatal("snapshot template string cannot be empty") + } + + if strings.ContainsAny(opts.SnapshotTemplate, `\/`) { + return errors.Fatal("snapshot template string contains a slash (/) or backslash (\\) character") + } + + if len(args) == 0 { + return errors.Fatal("wrong number of parameters") + } + + mountpoint := args[0] + + AddCleanupHandler(func() error { + debug.Log("running umount cleanup handler for mount at %v", mountpoint) + err := umount(mountpoint) + if err != nil { + Warnf("unable to umount (maybe already umounted?): %v\n", err) + } + return nil + }) + + return mount(opts, gopts, mountpoint) +} diff --git a/cmd/restic/cmd_options.go b/cmd/restic/cmd_options.go new file mode 100644 index 000000000..6edcbebec --- /dev/null +++ b/cmd/restic/cmd_options.go @@ -0,0 +1,29 @@ +package main + +import ( + "fmt" + + "github.com/restic/restic/internal/options" + + "github.com/spf13/cobra" +) + +var optionsCmd = &cobra.Command{ + Use: "options", + Short: "Print list of extended options", + Long: ` +The "options" command prints a list of extended options. +`, + Hidden: true, + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("All Extended Options:\n") + for _, opt := range options.List() { + fmt.Printf(" %-15s %s\n", opt.Namespace+"."+opt.Name, opt.Text) + } + }, +} + +func init() { + cmdRoot.AddCommand(optionsCmd) +} diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go new file mode 100644 index 000000000..125b41c7e --- /dev/null +++ b/cmd/restic/cmd_prune.go @@ -0,0 +1,307 @@ +package main + +import ( + "fmt" + "time" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/index" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + + "github.com/spf13/cobra" +) + +var cmdPrune = &cobra.Command{ + Use: "prune [flags]", + Short: "Remove unneeded data from the repository", + Long: ` +The "prune" command checks the repository and removes data that is not +referenced and therefore not needed any more. +`, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runPrune(globalOptions) + }, +} + +func init() { + cmdRoot.AddCommand(cmdPrune) +} + +func shortenStatus(maxLength int, s string) string { + if len(s) <= maxLength { + return s + } + + if maxLength < 3 { + return s[:maxLength] + } + + return s[:maxLength-3] + "..." +} + +// newProgressMax returns a progress that counts blobs. +func newProgressMax(show bool, max uint64, description string) *restic.Progress { + if !show { + return nil + } + + p := restic.NewProgress() + + p.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) { + status := fmt.Sprintf("[%s] %s %d / %d %s", + formatDuration(d), + formatPercent(s.Blobs, max), + s.Blobs, max, description) + + if w := stdoutTerminalWidth(); w > 0 { + status = shortenStatus(w, status) + } + + PrintProgress("%s", status) + } + + p.OnDone = func(s restic.Stat, d time.Duration, ticker bool) { + fmt.Printf("\n") + } + + return p +} + +func runPrune(gopts GlobalOptions) error { + repo, err := OpenRepository(gopts) + if err != nil { + return err + } + + lock, err := lockRepoExclusive(repo) + defer unlockRepo(lock) + if err != nil { + return err + } + + return pruneRepository(gopts, repo) +} + +func mixedBlobs(list []restic.Blob) bool { + var tree, data bool + + for _, pb := range list { + switch pb.Type { + case restic.TreeBlob: + tree = true + case restic.DataBlob: + data = true + } + + if tree && data { + return true + } + } + + return false +} + +func pruneRepository(gopts GlobalOptions, repo restic.Repository) error { + ctx := gopts.ctx + + err := repo.LoadIndex(ctx) + if err != nil { + return err + } + + var stats struct { + blobs int + packs int + snapshots int + bytes int64 + } + + Verbosef("counting files in repo\n") + err = repo.List(ctx, restic.DataFile, func(restic.ID, int64) error { + stats.packs++ + return nil + }) + if err != nil { + return err + } + + Verbosef("building new index for repo\n") + + bar := newProgressMax(!gopts.Quiet, uint64(stats.packs), "packs") + idx, invalidFiles, err := index.New(ctx, repo, restic.NewIDSet(), bar) + if err != nil { + return err + } + + for _, id := range invalidFiles { + Warnf("incomplete pack file (will be removed): %v\n", id) + } + + blobs := 0 + for _, pack := range idx.Packs { + stats.bytes += pack.Size + blobs += len(pack.Entries) + } + Verbosef("repository contains %v packs (%v blobs) with %v\n", + len(idx.Packs), blobs, formatBytes(uint64(stats.bytes))) + + blobCount := make(map[restic.BlobHandle]int) + var duplicateBlobs uint64 + var duplicateBytes uint64 + + // find duplicate blobs + for _, p := range idx.Packs { + for _, entry := range p.Entries { + stats.blobs++ + h := restic.BlobHandle{ID: entry.ID, Type: entry.Type} + blobCount[h]++ + + if blobCount[h] > 1 { + duplicateBlobs++ + duplicateBytes += uint64(entry.Length) + } + } + } + + Verbosef("processed %d blobs: %d duplicate blobs, %v duplicate\n", + stats.blobs, duplicateBlobs, formatBytes(uint64(duplicateBytes))) + Verbosef("load all snapshots\n") + + // find referenced blobs + snapshots, err := restic.LoadAllSnapshots(ctx, repo) + if err != nil { + return err + } + + stats.snapshots = len(snapshots) + + Verbosef("find data that is still in use for %d snapshots\n", stats.snapshots) + + usedBlobs := restic.NewBlobSet() + seenBlobs := restic.NewBlobSet() + + bar = newProgressMax(!gopts.Quiet, uint64(len(snapshots)), "snapshots") + bar.Start() + for _, sn := range snapshots { + debug.Log("process snapshot %v", sn.ID()) + + err = restic.FindUsedBlobs(ctx, repo, *sn.Tree, usedBlobs, seenBlobs) + if err != nil { + if repo.Backend().IsNotExist(err) { + return errors.Fatal("unable to load a tree from the repo: " + err.Error()) + } + + return err + } + + debug.Log("processed snapshot %v", sn.ID()) + bar.Report(restic.Stat{Blobs: 1}) + } + bar.Done() + + if len(usedBlobs) > stats.blobs { + return errors.Fatalf("number of used blobs is larger than number of available blobs!\n" + + "Please report this error (along with the output of the 'prune' run) at\n" + + "https://github.com/restic/restic/issues/new") + } + + Verbosef("found %d of %d data blobs still in use, removing %d blobs\n", + len(usedBlobs), stats.blobs, stats.blobs-len(usedBlobs)) + + // find packs that need a rewrite + rewritePacks := restic.NewIDSet() + for _, pack := range idx.Packs { + if mixedBlobs(pack.Entries) { + rewritePacks.Insert(pack.ID) + continue + } + + for _, blob := range pack.Entries { + h := restic.BlobHandle{ID: blob.ID, Type: blob.Type} + if !usedBlobs.Has(h) { + rewritePacks.Insert(pack.ID) + continue + } + + if blobCount[h] > 1 { + rewritePacks.Insert(pack.ID) + } + } + } + + removeBytes := duplicateBytes + + // find packs that are unneeded + removePacks := restic.NewIDSet() + + Verbosef("will remove %d invalid files\n", len(invalidFiles)) + for _, id := range invalidFiles { + removePacks.Insert(id) + } + + for packID, p := range idx.Packs { + + hasActiveBlob := false + for _, blob := range p.Entries { + h := restic.BlobHandle{ID: blob.ID, Type: blob.Type} + if usedBlobs.Has(h) { + hasActiveBlob = true + continue + } + + removeBytes += uint64(blob.Length) + } + + if hasActiveBlob { + continue + } + + removePacks.Insert(packID) + + if !rewritePacks.Has(packID) { + return errors.Fatalf("pack %v is unneeded, but not contained in rewritePacks", packID.Str()) + } + + rewritePacks.Delete(packID) + } + + Verbosef("will delete %d packs and rewrite %d packs, this frees %s\n", + len(removePacks), len(rewritePacks), formatBytes(uint64(removeBytes))) + + var obsoletePacks restic.IDSet + if len(rewritePacks) != 0 { + bar = newProgressMax(!gopts.Quiet, uint64(len(rewritePacks)), "packs rewritten") + bar.Start() + obsoletePacks, err = repository.Repack(ctx, repo, rewritePacks, usedBlobs, bar) + if err != nil { + return err + } + bar.Done() + } + + removePacks.Merge(obsoletePacks) + + if err = rebuildIndex(ctx, repo, removePacks); err != nil { + return err + } + + if len(removePacks) != 0 { + bar = newProgressMax(!gopts.Quiet, uint64(len(removePacks)), "packs deleted") + bar.Start() + for packID := range removePacks { + h := restic.Handle{Type: restic.DataFile, Name: packID.String()} + err = repo.Backend().Remove(ctx, h) + if err != nil { + Warnf("unable to remove file %v from the repository\n", packID.Str()) + } + bar.Report(restic.Stat{Blobs: 1}) + } + bar.Done() + } + + Verbosef("done\n") + return nil +} diff --git a/cmd/restic/cmd_rebuild_index.go b/cmd/restic/cmd_rebuild_index.go new file mode 100644 index 000000000..ec4287af1 --- /dev/null +++ b/cmd/restic/cmd_rebuild_index.go @@ -0,0 +1,95 @@ +package main + +import ( + "context" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/index" + "github.com/restic/restic/internal/restic" + + "github.com/spf13/cobra" +) + +var cmdRebuildIndex = &cobra.Command{ + Use: "rebuild-index [flags]", + Short: "Build a new index file", + Long: ` +The "rebuild-index" command creates a new index based on the pack files in the +repository. +`, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runRebuildIndex(globalOptions) + }, +} + +func init() { + cmdRoot.AddCommand(cmdRebuildIndex) +} + +func runRebuildIndex(gopts GlobalOptions) error { + repo, err := OpenRepository(gopts) + if err != nil { + return err + } + + lock, err := lockRepoExclusive(repo) + defer unlockRepo(lock) + if err != nil { + return err + } + + ctx, cancel := context.WithCancel(gopts.ctx) + defer cancel() + return rebuildIndex(ctx, repo, restic.NewIDSet()) +} + +func rebuildIndex(ctx context.Context, repo restic.Repository, ignorePacks restic.IDSet) error { + Verbosef("counting files in repo\n") + + var packs uint64 + err := repo.List(ctx, restic.DataFile, func(restic.ID, int64) error { + packs++ + return nil + }) + if err != nil { + return err + } + + bar := newProgressMax(!globalOptions.Quiet, packs-uint64(len(ignorePacks)), "packs") + idx, _, err := index.New(ctx, repo, ignorePacks, bar) + if err != nil { + return err + } + + Verbosef("finding old index files\n") + + var supersedes restic.IDs + err = repo.List(ctx, restic.IndexFile, func(id restic.ID, size int64) error { + supersedes = append(supersedes, id) + return nil + }) + if err != nil { + return err + } + + ids, err := idx.Save(ctx, repo, supersedes) + if err != nil { + return errors.Fatalf("unable to save index, last error was: %v", err) + } + + Verbosef("saved new indexes as %v\n", ids) + + Verbosef("remove %d old index files\n", len(supersedes)) + + for _, id := range supersedes { + if err := repo.Backend().Remove(ctx, restic.Handle{ + Type: restic.IndexFile, + Name: id.String(), + }); err != nil { + Warnf("error removing old index %v: %v\n", id.Str(), err) + } + } + + return nil +} diff --git a/cmd/restic/cmd_recover.go b/cmd/restic/cmd_recover.go new file mode 100644 index 000000000..677442be7 --- /dev/null +++ b/cmd/restic/cmd_recover.go @@ -0,0 +1,148 @@ +package main + +import ( + "os" + "time" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + "github.com/spf13/cobra" +) + +var cmdRecover = &cobra.Command{ + Use: "recover [flags]", + Short: "Recover data from the repository", + Long: ` +The "recover" command build a new snapshot from all directories it can find in +the raw data of the repository. It can be used if, for example, a snapshot has +been removed by accident with "forget". +`, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runRecover(globalOptions) + }, +} + +func init() { + cmdRoot.AddCommand(cmdRecover) +} + +func runRecover(gopts GlobalOptions) error { + hostname, err := os.Hostname() + if err != nil { + return err + } + + repo, err := OpenRepository(gopts) + if err != nil { + return err + } + + lock, err := lockRepo(repo) + defer unlockRepo(lock) + if err != nil { + return err + } + + Verbosef("load index files\n") + if err = repo.LoadIndex(gopts.ctx); err != nil { + return err + } + + // trees maps a tree ID to whether or not it is referenced by a different + // tree. If it is not referenced, we have a root tree. + trees := make(map[restic.ID]bool) + + for blob := range repo.Index().Each(gopts.ctx) { + if blob.Blob.Type != restic.TreeBlob { + continue + } + trees[blob.Blob.ID] = false + } + + cur := 0 + max := len(trees) + Verbosef("load %d trees\n\n", len(trees)) + + for id := range trees { + cur++ + Verbosef("\rtree (%v/%v)", cur, max) + + if !trees[id] { + trees[id] = false + } + + tree, err := repo.LoadTree(gopts.ctx, id) + if err != nil { + Warnf("unable to load tree %v: %v\n", id.Str(), err) + continue + } + + for _, node := range tree.Nodes { + if node.Type != "dir" || node.Subtree == nil { + continue + } + + subtree := *node.Subtree + trees[subtree] = true + } + } + Verbosef("\ndone\n") + + roots := restic.NewIDSet() + for id, seen := range trees { + if seen { + continue + } + + roots.Insert(id) + } + + Verbosef("found %d roots\n", len(roots)) + + tree := restic.NewTree() + for id := range roots { + var subtreeID = id + node := restic.Node{ + Type: "dir", + Name: id.Str(), + Mode: 0755, + Subtree: &subtreeID, + AccessTime: time.Now(), + ModTime: time.Now(), + ChangeTime: time.Now(), + } + tree.Insert(&node) + } + + treeID, err := repo.SaveTree(gopts.ctx, tree) + if err != nil { + return errors.Fatalf("unable to save new tree to the repo: %v", err) + } + + err = repo.Flush(gopts.ctx) + if err != nil { + return errors.Fatalf("unable to save blobs to the repo: %v", err) + } + + err = repo.SaveIndex(gopts.ctx) + if err != nil { + return errors.Fatalf("unable to save new index to the repo: %v", err) + } + + sn, err := restic.NewSnapshot([]string{"/recover"}, []string{}, hostname, time.Now()) + if err != nil { + return errors.Fatalf("unable to save snapshot: %v", err) + } + + sn.Tree = &treeID + + id, err := repo.SaveJSONUnpacked(gopts.ctx, restic.SnapshotFile, sn) + if err != nil { + return errors.Fatalf("unable to save snapshot: %v", err) + } + + Printf("saved new snapshot %v\n", id.Str()) + + return nil +} diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go new file mode 100644 index 000000000..477192eab --- /dev/null +++ b/cmd/restic/cmd_restore.go @@ -0,0 +1,169 @@ +package main + +import ( + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/filter" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/restorer" + + "github.com/spf13/cobra" +) + +var cmdRestore = &cobra.Command{ + Use: "restore [flags] snapshotID", + Short: "Extract the data from a snapshot", + Long: ` +The "restore" command extracts the data from a snapshot from the repository to +a directory. + +The special snapshot "latest" can be used to restore the latest snapshot in the +repository. +`, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runRestore(restoreOptions, globalOptions, args) + }, +} + +// RestoreOptions collects all options for the restore command. +type RestoreOptions struct { + Exclude []string + Include []string + Target string + Host string + Paths []string + Tags restic.TagLists + Verify bool +} + +var restoreOptions RestoreOptions + +func init() { + cmdRoot.AddCommand(cmdRestore) + + flags := cmdRestore.Flags() + flags.StringArrayVarP(&restoreOptions.Exclude, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)") + flags.StringArrayVarP(&restoreOptions.Include, "include", "i", nil, "include a `pattern`, exclude everything else (can be specified multiple times)") + flags.StringVarP(&restoreOptions.Target, "target", "t", "", "directory to extract data to") + + flags.StringVarP(&restoreOptions.Host, "host", "H", "", `only consider snapshots for this host when the snapshot ID is "latest"`) + flags.Var(&restoreOptions.Tags, "tag", "only consider snapshots which include this `taglist` for snapshot ID \"latest\"") + flags.StringArrayVar(&restoreOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path` for snapshot ID \"latest\"") + flags.BoolVar(&restoreOptions.Verify, "verify", false, "verify restored files content") +} + +func runRestore(opts RestoreOptions, gopts GlobalOptions, args []string) error { + ctx := gopts.ctx + + switch { + case len(args) == 0: + return errors.Fatal("no snapshot ID specified") + case len(args) > 1: + return errors.Fatalf("more than one snapshot ID specified: %v", args) + } + + if opts.Target == "" { + return errors.Fatal("please specify a directory to restore to (--target)") + } + + if len(opts.Exclude) > 0 && len(opts.Include) > 0 { + return errors.Fatal("exclude and include patterns are mutually exclusive") + } + + snapshotIDString := args[0] + + debug.Log("restore %v to %v", snapshotIDString, opts.Target) + + repo, err := OpenRepository(gopts) + if err != nil { + return err + } + + if !gopts.NoLock { + lock, err := lockRepo(repo) + defer unlockRepo(lock) + if err != nil { + return err + } + } + + err = repo.LoadIndex(ctx) + if err != nil { + return err + } + + var id restic.ID + + if snapshotIDString == "latest" { + id, err = restic.FindLatestSnapshot(ctx, repo, opts.Paths, opts.Tags, opts.Host) + if err != nil { + Exitf(1, "latest snapshot for criteria not found: %v Paths:%v Host:%v", err, opts.Paths, opts.Host) + } + } else { + id, err = restic.FindSnapshot(repo, snapshotIDString) + if err != nil { + Exitf(1, "invalid id %q: %v", snapshotIDString, err) + } + } + + res, err := restorer.NewRestorer(repo, id) + if err != nil { + Exitf(2, "creating restorer failed: %v\n", err) + } + + totalErrors := 0 + res.Error = func(location string, err error) error { + Warnf("ignoring error for %s: %s\n", location, err) + totalErrors++ + return nil + } + + selectExcludeFilter := func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { + matched, _, err := filter.List(opts.Exclude, item) + if err != nil { + Warnf("error for exclude pattern: %v", err) + } + + // An exclude filter is basically a 'wildcard but foo', + // so even if a childMayMatch, other children of a dir may not, + // therefore childMayMatch does not matter, but we should not go down + // unless the dir is selected for restore + selectedForRestore = !matched + childMayBeSelected = selectedForRestore && node.Type == "dir" + + return selectedForRestore, childMayBeSelected + } + + selectIncludeFilter := func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { + matched, childMayMatch, err := filter.List(opts.Include, item) + if err != nil { + Warnf("error for include pattern: %v", err) + } + + selectedForRestore = matched + childMayBeSelected = childMayMatch && node.Type == "dir" + + return selectedForRestore, childMayBeSelected + } + + if len(opts.Exclude) > 0 { + res.SelectFilter = selectExcludeFilter + } else if len(opts.Include) > 0 { + res.SelectFilter = selectIncludeFilter + } + + Verbosef("restoring %s to %s\n", res.Snapshot(), opts.Target) + + err = res.RestoreTo(ctx, opts.Target) + if err == nil && opts.Verify { + Verbosef("verifying files in %s\n", opts.Target) + var count int + count, err = res.VerifyFiles(ctx, opts.Target) + Verbosef("finished verifying %d files in %s\n", count, opts.Target) + } + if totalErrors > 0 { + Printf("There were %d errors\n", totalErrors) + } + return err +} diff --git a/cmd/restic/cmd_self_update.go b/cmd/restic/cmd_self_update.go new file mode 100644 index 000000000..5aed085de --- /dev/null +++ b/cmd/restic/cmd_self_update.go @@ -0,0 +1,73 @@ +// xbuild selfupdate + +package main + +import ( + "os" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/selfupdate" + "github.com/spf13/cobra" +) + +var cmdSelfUpdate = &cobra.Command{ + Use: "self-update [flags]", + Short: "Update the restic binary", + Long: ` +The command "self-update" downloads the latest stable release of restic from +GitHub and replaces the currently running binary. After download, the +authenticity of the binary is verified using the GPG signature on the release +files. +`, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runSelfUpdate(selfUpdateOptions, globalOptions, args) + }, +} + +// SelfUpdateOptions collects all options for the update-restic command. +type SelfUpdateOptions struct { + Output string +} + +var selfUpdateOptions SelfUpdateOptions + +func init() { + cmdRoot.AddCommand(cmdSelfUpdate) + + flags := cmdSelfUpdate.Flags() + flags.StringVar(&selfUpdateOptions.Output, "output", "", "Save the downloaded file as `filename` (default: running binary itself)") +} + +func runSelfUpdate(opts SelfUpdateOptions, gopts GlobalOptions, args []string) error { + if opts.Output == "" { + file, err := os.Executable() + if err != nil { + return errors.Wrap(err, "unable to find executable") + } + + opts.Output = file + } + + fi, err := os.Lstat(opts.Output) + if err != nil { + return err + } + + if !fi.Mode().IsRegular() { + return errors.Errorf("output file %v is not a normal file, use --output to specify a different file", opts.Output) + } + + Printf("writing restic to %v\n", opts.Output) + + v, err := selfupdate.DownloadLatestStableRelease(gopts.ctx, opts.Output, version, Verbosef) + if err != nil { + return errors.Fatalf("unable to update restic: %v", err) + } + + if v != version { + Printf("successfully updated restic to version %v\n", v) + } + + return nil +} diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go new file mode 100644 index 000000000..d9623b942 --- /dev/null +++ b/cmd/restic/cmd_snapshots.go @@ -0,0 +1,250 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "io" + "sort" + "strings" + + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/table" + "github.com/spf13/cobra" +) + +var cmdSnapshots = &cobra.Command{ + Use: "snapshots [snapshotID ...]", + Short: "List all snapshots", + Long: ` +The "snapshots" command lists all snapshots stored in the repository. +`, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runSnapshots(snapshotOptions, globalOptions, args) + }, +} + +// SnapshotOptions bundles all options for the snapshots command. +type SnapshotOptions struct { + Host string + Tags restic.TagLists + Paths []string + Compact bool + Last bool +} + +var snapshotOptions SnapshotOptions + +func init() { + cmdRoot.AddCommand(cmdSnapshots) + + f := cmdSnapshots.Flags() + f.StringVarP(&snapshotOptions.Host, "host", "H", "", "only consider snapshots for this `host`") + f.Var(&snapshotOptions.Tags, "tag", "only consider snapshots which include this `taglist` (can be specified multiple times)") + f.StringArrayVar(&snapshotOptions.Paths, "path", nil, "only consider snapshots for this `path` (can be specified multiple times)") + f.BoolVarP(&snapshotOptions.Compact, "compact", "c", false, "use compact format") + f.BoolVar(&snapshotOptions.Last, "last", false, "only show the last snapshot for each host and path") +} + +func runSnapshots(opts SnapshotOptions, gopts GlobalOptions, args []string) error { + repo, err := OpenRepository(gopts) + if err != nil { + return err + } + + if !gopts.NoLock { + lock, err := lockRepo(repo) + defer unlockRepo(lock) + if err != nil { + return err + } + } + + ctx, cancel := context.WithCancel(gopts.ctx) + defer cancel() + + var list restic.Snapshots + for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, args) { + list = append(list, sn) + } + + if opts.Last { + list = FilterLastSnapshots(list) + } + + sort.Sort(sort.Reverse(list)) + + if gopts.JSON { + err := printSnapshotsJSON(gopts.stdout, list) + if err != nil { + Warnf("error printing snapshot: %v\n", err) + } + return nil + } + PrintSnapshots(gopts.stdout, list, nil, opts.Compact) + + return nil +} + +// filterLastSnapshotsKey is used by FilterLastSnapshots. +type filterLastSnapshotsKey struct { + Hostname string + JoinedPaths string +} + +// newFilterLastSnapshotsKey initializes a filterLastSnapshotsKey from a Snapshot +func newFilterLastSnapshotsKey(sn *restic.Snapshot) filterLastSnapshotsKey { + // Shallow slice copy + var paths = make([]string, len(sn.Paths)) + copy(paths, sn.Paths) + sort.Strings(paths) + return filterLastSnapshotsKey{sn.Hostname, strings.Join(paths, "|")} +} + +// FilterLastSnapshots filters a list of snapshots to only return the last +// entry for each hostname and path. If the snapshot contains multiple paths, +// they will be joined and treated as one item. +func FilterLastSnapshots(list restic.Snapshots) restic.Snapshots { + // Sort the snapshots so that the newer ones are listed first + sort.SliceStable(list, func(i, j int) bool { + return list[i].Time.After(list[j].Time) + }) + + var results restic.Snapshots + seen := make(map[filterLastSnapshotsKey]bool) + for _, sn := range list { + key := newFilterLastSnapshotsKey(sn) + if !seen[key] { + seen[key] = true + results = append(results, sn) + } + } + return results +} + +// PrintSnapshots prints a text table of the snapshots in list to stdout. +func PrintSnapshots(stdout io.Writer, list restic.Snapshots, reasons []restic.KeepReason, compact bool) { + // keep the reasons a snasphot is being kept in a map, so that it doesn't + // get lost when the list of snapshots is sorted + keepReasons := make(map[restic.ID]restic.KeepReason, len(reasons)) + if len(reasons) > 0 { + for i, sn := range list { + id := sn.ID() + keepReasons[*id] = reasons[i] + } + } + + // always sort the snapshots so that the newer ones are listed last + sort.SliceStable(list, func(i, j int) bool { + return list[i].Time.Before(list[j].Time) + }) + + // Determine the max widths for host and tag. + maxHost, maxTag := 10, 6 + for _, sn := range list { + if len(sn.Hostname) > maxHost { + maxHost = len(sn.Hostname) + } + for _, tag := range sn.Tags { + if len(tag) > maxTag { + maxTag = len(tag) + } + } + } + + tab := table.New() + + if compact { + tab.AddColumn("ID", "{{ .ID }}") + tab.AddColumn("Time", "{{ .Timestamp }}") + tab.AddColumn("Host", "{{ .Hostname }}") + tab.AddColumn("Tags ", `{{ join .Tags "\n" }}`) + } else { + tab.AddColumn("ID", "{{ .ID }}") + tab.AddColumn("Time", "{{ .Timestamp }}") + tab.AddColumn("Host ", "{{ .Hostname }}") + tab.AddColumn("Tags ", `{{ join .Tags "," }}`) + if len(reasons) > 0 { + tab.AddColumn("Reasons", `{{ join .Reasons "\n" }}`) + } + tab.AddColumn("Paths", `{{ join .Paths "\n" }}`) + } + + type snapshot struct { + ID string + Timestamp string + Hostname string + Tags []string + Reasons []string + Paths []string + } + + var multiline bool + for _, sn := range list { + data := snapshot{ + ID: sn.ID().Str(), + Timestamp: sn.Time.Local().Format(TimeFormat), + Hostname: sn.Hostname, + Tags: sn.Tags, + Paths: sn.Paths, + } + + if len(reasons) > 0 { + id := sn.ID() + data.Reasons = keepReasons[*id].Matches + } + + if len(sn.Paths) > 1 && !compact { + multiline = true + } + + tab.AddRow(data) + } + + tab.AddFooter(fmt.Sprintf("%d snapshots", len(list))) + + if multiline { + // print an additional blank line between snapshots + + var last int + tab.PrintData = func(w io.Writer, idx int, s string) error { + var err error + if idx == last { + _, err = fmt.Fprintf(w, "%s\n", s) + } else { + _, err = fmt.Fprintf(w, "\n%s\n", s) + } + last = idx + return err + } + } + + tab.Write(stdout) +} + +// Snapshot helps to print Snaphots as JSON with their ID included. +type Snapshot struct { + *restic.Snapshot + + ID *restic.ID `json:"id"` + ShortID string `json:"short_id"` +} + +// printSnapshotsJSON writes the JSON representation of list to stdout. +func printSnapshotsJSON(stdout io.Writer, list restic.Snapshots) error { + + var snapshots []Snapshot + + for _, sn := range list { + + k := Snapshot{ + Snapshot: sn, + ID: sn.ID(), + ShortID: sn.ID().Str(), + } + snapshots = append(snapshots, k) + } + + return json.NewEncoder(stdout).Encode(snapshots) +} diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go new file mode 100644 index 000000000..590ef5f14 --- /dev/null +++ b/cmd/restic/cmd_stats.go @@ -0,0 +1,324 @@ +package main + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "os" + "path/filepath" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/walker" + "github.com/spf13/cobra" +) + +var cmdStats = &cobra.Command{ + Use: "stats [flags] [snapshot-ID]", + Short: "Scan the repository and show basic statistics", + Long: ` +The "stats" command walks one or all snapshots in a repository and +accumulates statistics about the data stored therein. It reports on +the number of unique files and their sizes, according to one of +the counting modes as given by the --mode flag. + +If no snapshot is specified, all snapshots will be considered. Some +modes make more sense over just a single snapshot, while others +are useful across all snapshots, depending on what you are trying +to calculate. + +The modes are: + +* restore-size: (default) Counts the size of the restored files. +* files-by-contents: Counts total size of files, where a file is + considered unique if it has unique contents. +* raw-data: Counts the size of blobs in the repository, regardless of + how many files reference them. +* blobs-per-file: A combination of files-by-contents and raw-data. +* Refer to the online manual for more details about each mode. +`, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runStats(globalOptions, args) + }, +} + +func init() { + cmdRoot.AddCommand(cmdStats) + f := cmdStats.Flags() + f.StringVar(&countMode, "mode", countModeRestoreSize, "counting mode: restore-size (default), files-by-contents, blobs-per-file, or raw-data") + f.StringVarP(&snapshotByHost, "host", "H", "", "filter latest snapshot by this hostname") +} + +func runStats(gopts GlobalOptions, args []string) error { + err := verifyStatsInput(gopts, args) + if err != nil { + return err + } + + ctx, cancel := context.WithCancel(gopts.ctx) + defer cancel() + + repo, err := OpenRepository(gopts) + if err != nil { + return err + } + + if err = repo.LoadIndex(ctx); err != nil { + return err + } + + if !gopts.NoLock { + lock, err := lockRepo(repo) + defer unlockRepo(lock) + if err != nil { + return err + } + } + + if !gopts.JSON { + Printf("scanning...\n") + } + + // create a container for the stats (and other needed state) + stats := &statsContainer{ + uniqueFiles: make(map[fileID]struct{}), + fileBlobs: make(map[string]restic.IDSet), + blobs: restic.NewBlobSet(), + blobsSeen: restic.NewBlobSet(), + } + + if snapshotIDString != "" { + // scan just a single snapshot + + var sID restic.ID + if snapshotIDString == "latest" { + sID, err = restic.FindLatestSnapshot(ctx, repo, []string{}, []restic.TagList{}, snapshotByHost) + if err != nil { + return errors.Fatalf("latest snapshot for criteria not found: %v", err) + } + } else { + sID, err = restic.FindSnapshot(repo, snapshotIDString) + if err != nil { + return errors.Fatalf("error loading snapshot: %v", err) + } + } + + snapshot, err := restic.LoadSnapshot(ctx, repo, sID) + if err != nil { + return errors.Fatalf("error loading snapshot from repo: %v", err) + } + + err = statsWalkSnapshot(ctx, snapshot, repo, stats) + } else { + // iterate every snapshot in the repo + err = repo.List(ctx, restic.SnapshotFile, func(snapshotID restic.ID, size int64) error { + snapshot, err := restic.LoadSnapshot(ctx, repo, snapshotID) + if err != nil { + return fmt.Errorf("Error loading snapshot %s: %v", snapshotID.Str(), err) + } + return statsWalkSnapshot(ctx, snapshot, repo, stats) + }) + } + if err != nil { + return err + } + + if countMode == countModeRawData { + // the blob handles have been collected, but not yet counted + for blobHandle := range stats.blobs { + blobSize, found := repo.LookupBlobSize(blobHandle.ID, blobHandle.Type) + if !found { + return fmt.Errorf("blob %v not found", blobHandle) + } + stats.TotalSize += uint64(blobSize) + stats.TotalBlobCount++ + } + } + + if gopts.JSON { + err = json.NewEncoder(os.Stdout).Encode(stats) + if err != nil { + return fmt.Errorf("encoding output: %v", err) + } + return nil + } + + // inform the user what was scanned and how it was scanned + snapshotsScanned := snapshotIDString + if snapshotsScanned == "latest" { + snapshotsScanned = "the latest snapshot" + } else if snapshotsScanned == "" { + snapshotsScanned = "all snapshots" + } + Printf("Stats for %s in %s mode:\n", snapshotsScanned, countMode) + + if stats.TotalBlobCount > 0 { + Printf(" Total Blob Count: %d\n", stats.TotalBlobCount) + } + if stats.TotalFileCount > 0 { + Printf(" Total File Count: %d\n", stats.TotalFileCount) + } + Printf(" Total Size: %-5s\n", formatBytes(stats.TotalSize)) + + return nil +} + +func statsWalkSnapshot(ctx context.Context, snapshot *restic.Snapshot, repo restic.Repository, stats *statsContainer) error { + if snapshot.Tree == nil { + return fmt.Errorf("snapshot %s has nil tree", snapshot.ID().Str()) + } + + if countMode == countModeRawData { + // count just the sizes of unique blobs; we don't need to walk the tree + // ourselves in this case, since a nifty function does it for us + return restic.FindUsedBlobs(ctx, repo, *snapshot.Tree, stats.blobs, stats.blobsSeen) + } + + err := walker.Walk(ctx, repo, *snapshot.Tree, restic.NewIDSet(), statsWalkTree(repo, stats)) + if err != nil { + return fmt.Errorf("walking tree %s: %v", *snapshot.Tree, err) + } + return nil +} + +func statsWalkTree(repo restic.Repository, stats *statsContainer) walker.WalkFunc { + return func(_ restic.ID, npath string, node *restic.Node, nodeErr error) (bool, error) { + if nodeErr != nil { + return true, nodeErr + } + if node == nil { + return true, nil + } + + if countMode == countModeUniqueFilesByContents || countMode == countModeBlobsPerFile { + // only count this file if we haven't visited it before + fid := makeFileIDByContents(node) + if _, ok := stats.uniqueFiles[fid]; !ok { + // mark the file as visited + stats.uniqueFiles[fid] = struct{}{} + + if countMode == countModeUniqueFilesByContents { + // simply count the size of each unique file (unique by contents only) + stats.TotalSize += node.Size + stats.TotalFileCount++ + } + if countMode == countModeBlobsPerFile { + // count the size of each unique blob reference, which is + // by unique file (unique by contents and file path) + for _, blobID := range node.Content { + // ensure we have this file (by path) in our map; in this + // mode, a file is unique by both contents and path + nodePath := filepath.Join(npath, node.Name) + if _, ok := stats.fileBlobs[nodePath]; !ok { + stats.fileBlobs[nodePath] = restic.NewIDSet() + stats.TotalFileCount++ + } + if _, ok := stats.fileBlobs[nodePath][blobID]; !ok { + // is always a data blob since we're accessing it via a file's Content array + blobSize, found := repo.LookupBlobSize(blobID, restic.DataBlob) + if !found { + return true, fmt.Errorf("blob %s not found for tree %s", blobID, *node.Subtree) + } + + // count the blob's size, then add this blob by this + // file (path) so we don't double-count it + stats.TotalSize += uint64(blobSize) + stats.fileBlobs[nodePath].Insert(blobID) + // this mode also counts total unique blob _references_ per file + stats.TotalBlobCount++ + } + } + } + } + } + + if countMode == countModeRestoreSize { + // as this is a file in the snapshot, we can simply count its + // size without worrying about uniqueness, since duplicate files + // will still be restored + stats.TotalSize += node.Size + stats.TotalFileCount++ + } + + return true, nil + } +} + +// makeFileIDByContents returns a hash of the blob IDs of the +// node's Content in sequence. +func makeFileIDByContents(node *restic.Node) fileID { + var bb []byte + for _, c := range node.Content { + bb = append(bb, []byte(c[:])...) + } + return sha256.Sum256(bb) +} + +func verifyStatsInput(gopts GlobalOptions, args []string) error { + // require a recognized counting mode + switch countMode { + case countModeRestoreSize: + case countModeUniqueFilesByContents: + case countModeBlobsPerFile: + case countModeRawData: + default: + return fmt.Errorf("unknown counting mode: %s (use the -h flag to get a list of supported modes)", countMode) + } + + // ensure at most one snapshot was specified + if len(args) > 1 { + return fmt.Errorf("only one snapshot may be specified") + } + + // if a snapshot was specified, mark it as the one to scan + if len(args) == 1 { + snapshotIDString = args[0] + } + + return nil +} + +// statsContainer holds information during a walk of a repository +// to collect information about it, as well as state needed +// for a successful and efficient walk. +type statsContainer struct { + TotalSize uint64 `json:"total_size"` + TotalFileCount uint64 `json:"total_file_count"` + TotalBlobCount uint64 `json:"total_blob_count,omitempty"` + + // uniqueFiles marks visited files according to their + // contents (hashed sequence of content blob IDs) + uniqueFiles map[fileID]struct{} + + // fileBlobs maps a file name (path) to the set of + // blobs that have been seen as a part of the file + fileBlobs map[string]restic.IDSet + + // blobs and blobsSeen are used to count individual + // unique blobs, independent of references to files + blobs, blobsSeen restic.BlobSet +} + +// fileID is a 256-bit hash that distinguishes unique files. +type fileID [32]byte + +var ( + // the mode of counting to perform + countMode string + + // the snapshot to scan, as given by the user + snapshotIDString string + + // snapshotByHost is the host to filter latest + // snapshot by, if given by user + snapshotByHost string +) + +const ( + countModeRestoreSize = "restore-size" + countModeUniqueFilesByContents = "files-by-contents" + countModeBlobsPerFile = "blobs-per-file" + countModeRawData = "raw-data" +) diff --git a/cmd/restic/cmd_tag.go b/cmd/restic/cmd_tag.go new file mode 100644 index 000000000..f120f9cf4 --- /dev/null +++ b/cmd/restic/cmd_tag.go @@ -0,0 +1,143 @@ +package main + +import ( + "context" + + "github.com/spf13/cobra" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" +) + +var cmdTag = &cobra.Command{ + Use: "tag [flags] [snapshot-ID ...]", + Short: "Modify tags on snapshots", + Long: ` +The "tag" command allows you to modify tags on exiting snapshots. + +You can either set/replace the entire set of tags on a snapshot, or +add tags to/remove tags from the existing set. + +When no snapshot-ID is given, all snapshots matching the host, tag and path filter criteria are modified. +`, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runTag(tagOptions, globalOptions, args) + }, +} + +// TagOptions bundles all options for the 'tag' command. +type TagOptions struct { + Host string + Paths []string + Tags restic.TagLists + SetTags []string + AddTags []string + RemoveTags []string +} + +var tagOptions TagOptions + +func init() { + cmdRoot.AddCommand(cmdTag) + + tagFlags := cmdTag.Flags() + tagFlags.StringSliceVar(&tagOptions.SetTags, "set", nil, "`tag` which will replace the existing tags (can be given multiple times)") + tagFlags.StringSliceVar(&tagOptions.AddTags, "add", nil, "`tag` which will be added to the existing tags (can be given multiple times)") + tagFlags.StringSliceVar(&tagOptions.RemoveTags, "remove", nil, "`tag` which will be removed from the existing tags (can be given multiple times)") + + tagFlags.StringVarP(&tagOptions.Host, "host", "H", "", "only consider snapshots for this `host`, when no snapshot ID is given") + tagFlags.Var(&tagOptions.Tags, "tag", "only consider snapshots which include this `taglist`, when no snapshot-ID is given") + tagFlags.StringArrayVar(&tagOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path`, when no snapshot-ID is given") +} + +func changeTags(ctx context.Context, repo *repository.Repository, sn *restic.Snapshot, setTags, addTags, removeTags []string) (bool, error) { + var changed bool + + if len(setTags) != 0 { + // Setting the tag to an empty string really means no tags. + if len(setTags) == 1 && setTags[0] == "" { + setTags = nil + } + sn.Tags = setTags + changed = true + } else { + changed = sn.AddTags(addTags) + if sn.RemoveTags(removeTags) { + changed = true + } + } + + if changed { + // Retain the original snapshot id over all tag changes. + if sn.Original == nil { + sn.Original = sn.ID() + } + + // Save the new snapshot. + id, err := repo.SaveJSONUnpacked(ctx, restic.SnapshotFile, sn) + if err != nil { + return false, err + } + + debug.Log("new snapshot saved as %v", id) + + if err = repo.Flush(ctx); err != nil { + return false, err + } + + // Remove the old snapshot. + h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()} + if err = repo.Backend().Remove(ctx, h); err != nil { + return false, err + } + + debug.Log("old snapshot %v removed", sn.ID()) + } + return changed, nil +} + +func runTag(opts TagOptions, gopts GlobalOptions, args []string) error { + if len(opts.SetTags) == 0 && len(opts.AddTags) == 0 && len(opts.RemoveTags) == 0 { + return errors.Fatal("nothing to do!") + } + if len(opts.SetTags) != 0 && (len(opts.AddTags) != 0 || len(opts.RemoveTags) != 0) { + return errors.Fatal("--set and --add/--remove cannot be given at the same time") + } + + repo, err := OpenRepository(gopts) + if err != nil { + return err + } + + if !gopts.NoLock { + Verbosef("create exclusive lock for repository\n") + lock, err := lockRepoExclusive(repo) + defer unlockRepo(lock) + if err != nil { + return err + } + } + + changeCnt := 0 + ctx, cancel := context.WithCancel(gopts.ctx) + defer cancel() + for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, args) { + changed, err := changeTags(ctx, repo, sn, opts.SetTags, opts.AddTags, opts.RemoveTags) + if err != nil { + Warnf("unable to modify the tags for snapshot ID %q, ignoring: %v\n", sn.ID(), err) + continue + } + if changed { + changeCnt++ + } + } + if changeCnt == 0 { + Verbosef("no snapshots were modified\n") + } else { + Verbosef("modified tags on %v snapshots\n", changeCnt) + } + return nil +} diff --git a/cmd/restic/cmd_unlock.go b/cmd/restic/cmd_unlock.go new file mode 100644 index 000000000..4b216905d --- /dev/null +++ b/cmd/restic/cmd_unlock.go @@ -0,0 +1,51 @@ +package main + +import ( + "github.com/restic/restic/internal/restic" + "github.com/spf13/cobra" +) + +var unlockCmd = &cobra.Command{ + Use: "unlock", + Short: "Remove locks other processes created", + Long: ` +The "unlock" command removes stale locks that have been created by other restic processes. +`, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runUnlock(unlockOptions, globalOptions) + }, +} + +// UnlockOptions collects all options for the unlock command. +type UnlockOptions struct { + RemoveAll bool +} + +var unlockOptions UnlockOptions + +func init() { + cmdRoot.AddCommand(unlockCmd) + + unlockCmd.Flags().BoolVar(&unlockOptions.RemoveAll, "remove-all", false, "remove all locks, even non-stale ones") +} + +func runUnlock(opts UnlockOptions, gopts GlobalOptions) error { + repo, err := OpenRepository(gopts) + if err != nil { + return err + } + + fn := restic.RemoveStaleLocks + if opts.RemoveAll { + fn = restic.RemoveAllLocks + } + + err = fn(gopts.ctx, repo) + if err != nil { + return err + } + + Verbosef("successfully removed locks\n") + return nil +} diff --git a/cmd/restic/cmd_version.go b/cmd/restic/cmd_version.go new file mode 100644 index 000000000..677079a50 --- /dev/null +++ b/cmd/restic/cmd_version.go @@ -0,0 +1,26 @@ +package main + +import ( + "fmt" + "runtime" + + "github.com/spf13/cobra" +) + +var versionCmd = &cobra.Command{ + Use: "version", + Short: "Print version information", + Long: ` +The "version" command prints detailed information about the build environment +and the version of this software. +`, + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("restic %s compiled with %v on %v/%v\n", + version, runtime.Version(), runtime.GOOS, runtime.GOARCH) + }, +} + +func init() { + cmdRoot.AddCommand(versionCmd) +} diff --git a/cmd/restic/doc.go b/cmd/restic/doc.go new file mode 100644 index 000000000..19b609b8d --- /dev/null +++ b/cmd/restic/doc.go @@ -0,0 +1,2 @@ +// This package contains the code for the restic executable. +package main diff --git a/cmd/restic/exclude.go b/cmd/restic/exclude.go new file mode 100644 index 000000000..479f8a308 --- /dev/null +++ b/cmd/restic/exclude.go @@ -0,0 +1,282 @@ +package main + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/filter" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/repository" +) + +type rejectionCache struct { + m map[string]bool + mtx sync.Mutex +} + +// Lock locks the mutex in rc. +func (rc *rejectionCache) Lock() { + if rc != nil { + rc.mtx.Lock() + } +} + +// Unlock unlocks the mutex in rc. +func (rc *rejectionCache) Unlock() { + if rc != nil { + rc.mtx.Unlock() + } +} + +// Get returns the last stored value for dir and a second boolean that +// indicates whether that value was actually written to the cache. It is the +// callers responsibility to call rc.Lock and rc.Unlock before using this +// method, otherwise data races may occur. +func (rc *rejectionCache) Get(dir string) (bool, bool) { + if rc == nil || rc.m == nil { + return false, false + } + v, ok := rc.m[dir] + return v, ok +} + +// Store stores a new value for dir. It is the callers responsibility to call +// rc.Lock and rc.Unlock before using this method, otherwise data races may +// occur. +func (rc *rejectionCache) Store(dir string, rejected bool) { + if rc == nil { + return + } + if rc.m == nil { + rc.m = make(map[string]bool) + } + rc.m[dir] = rejected +} + +// RejectByNameFunc is a function that takes a filename of a +// file that would be included in the backup. The function returns true if it +// should be excluded (rejected) from the backup. +type RejectByNameFunc func(path string) bool + +// RejectFunc is a function that takes a filename and os.FileInfo of a +// file that would be included in the backup. The function returns true if it +// should be excluded (rejected) from the backup. +type RejectFunc func(path string, fi os.FileInfo) bool + +// rejectByPattern returns a RejectByNameFunc which rejects files that match +// one of the patterns. +func rejectByPattern(patterns []string) RejectByNameFunc { + return func(item string) bool { + matched, _, err := filter.List(patterns, item) + if err != nil { + Warnf("error for exclude pattern: %v", err) + } + + if matched { + debug.Log("path %q excluded by an exclude pattern", item) + return true + } + + return false + } +} + +// rejectIfPresent returns a RejectByNameFunc which itself returns whether a path +// should be excluded. The RejectByNameFunc considers a file to be excluded when +// it resides in a directory with an exclusion file, that is specified by +// excludeFileSpec in the form "filename[:content]". The returned error is +// non-nil if the filename component of excludeFileSpec is empty. If rc is +// non-nil, it is going to be used in the RejectByNameFunc to expedite the evaluation +// of a directory based on previous visits. +func rejectIfPresent(excludeFileSpec string) (RejectByNameFunc, error) { + if excludeFileSpec == "" { + return nil, errors.New("name for exclusion tagfile is empty") + } + colon := strings.Index(excludeFileSpec, ":") + if colon == 0 { + return nil, fmt.Errorf("no name for exclusion tagfile provided") + } + tf, tc := "", "" + if colon > 0 { + tf = excludeFileSpec[:colon] + tc = excludeFileSpec[colon+1:] + } else { + tf = excludeFileSpec + } + debug.Log("using %q as exclusion tagfile", tf) + rc := &rejectionCache{} + fn := func(filename string) bool { + return isExcludedByFile(filename, tf, tc, rc) + } + return fn, nil +} + +// isExcludedByFile interprets filename as a path and returns true if that file +// is in a excluded directory. A directory is identified as excluded if it contains a +// tagfile which bears the name specified in tagFilename and starts with +// header. If rc is non-nil, it is used to expedite the evaluation of a +// directory based on previous visits. +func isExcludedByFile(filename, tagFilename, header string, rc *rejectionCache) bool { + if tagFilename == "" { + return false + } + dir, base := filepath.Split(filename) + if base == tagFilename { + return false // do not exclude the tagfile itself + } + rc.Lock() + defer rc.Unlock() + + rejected, visited := rc.Get(dir) + if visited { + return rejected + } + rejected = isDirExcludedByFile(dir, tagFilename, header) + rc.Store(dir, rejected) + return rejected +} + +func isDirExcludedByFile(dir, tagFilename, header string) bool { + tf := filepath.Join(dir, tagFilename) + _, err := fs.Lstat(tf) + if os.IsNotExist(err) { + return false + } + if err != nil { + Warnf("could not access exclusion tagfile: %v", err) + return false + } + // when no signature is given, the mere presence of tf is enough reason + // to exclude filename + if len(header) == 0 { + return true + } + // From this stage, errors mean tagFilename exists but it is malformed. + // Warnings will be generated so that the user is informed that the + // indented ignore-action is not performed. + f, err := os.Open(tf) + if err != nil { + Warnf("could not open exclusion tagfile: %v", err) + return false + } + defer f.Close() + buf := make([]byte, len(header)) + _, err = io.ReadFull(f, buf) + // EOF is handled with a dedicated message, otherwise the warning were too cryptic + if err == io.EOF { + Warnf("invalid (too short) signature in exclusion tagfile %q\n", tf) + return false + } + if err != nil { + Warnf("could not read signature from exclusion tagfile %q: %v\n", tf, err) + return false + } + if bytes.Compare(buf, []byte(header)) != 0 { + Warnf("invalid signature in exclusion tagfile %q\n", tf) + return false + } + return true +} + +// gatherDevices returns the set of unique device ids of the files and/or +// directory paths listed in "items". +func gatherDevices(items []string) (deviceMap map[string]uint64, err error) { + deviceMap = make(map[string]uint64) + for _, item := range items { + item, err = filepath.Abs(filepath.Clean(item)) + if err != nil { + return nil, err + } + + fi, err := fs.Lstat(item) + if err != nil { + return nil, err + } + id, err := fs.DeviceID(fi) + if err != nil { + return nil, err + } + deviceMap[item] = id + } + if len(deviceMap) == 0 { + return nil, errors.New("zero allowed devices") + } + return deviceMap, nil +} + +// rejectByDevice returns a RejectFunc that rejects files which are on a +// different file systems than the files/dirs in samples. +func rejectByDevice(samples []string) (RejectFunc, error) { + allowed, err := gatherDevices(samples) + if err != nil { + return nil, err + } + debug.Log("allowed devices: %v\n", allowed) + + return func(item string, fi os.FileInfo) bool { + if fi == nil { + return false + } + + item = filepath.Clean(item) + + id, err := fs.DeviceID(fi) + if err != nil { + // This should never happen because gatherDevices() would have + // errored out earlier. If it still does that's a reason to panic. + panic(err) + } + + for dir := item; ; dir = filepath.Dir(dir) { + debug.Log("item %v, test dir %v", item, dir) + + allowedID, ok := allowed[dir] + if !ok { + if dir == filepath.Dir(dir) { + break + } + continue + } + + if allowedID != id { + debug.Log("path %q on disallowed device %d", item, id) + return true + } + + return false + } + + panic(fmt.Sprintf("item %v, device id %v not found, allowedDevs: %v", item, id, allowed)) + }, nil +} + +// rejectResticCache returns a RejectByNameFunc that rejects the restic cache +// directory (if set). +func rejectResticCache(repo *repository.Repository) (RejectByNameFunc, error) { + if repo.Cache == nil { + return func(string) bool { + return false + }, nil + } + cacheBase := repo.Cache.BaseDir() + + if cacheBase == "" { + return nil, errors.New("cacheBase is empty string") + } + + return func(item string) bool { + if fs.HasPathPrefix(cacheBase, item) { + debug.Log("rejecting restic cache directory %v", item) + return true + } + + return false + }, nil +} diff --git a/cmd/restic/exclude_test.go b/cmd/restic/exclude_test.go new file mode 100644 index 000000000..741dbdb64 --- /dev/null +++ b/cmd/restic/exclude_test.go @@ -0,0 +1,164 @@ +package main + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/restic/restic/internal/test" +) + +func TestRejectByPattern(t *testing.T) { + var tests = []struct { + filename string + reject bool + }{ + {filename: "/home/user/foo.go", reject: true}, + {filename: "/home/user/foo.c", reject: false}, + {filename: "/home/user/foobar", reject: false}, + {filename: "/home/user/foobar/x", reject: true}, + {filename: "/home/user/README", reject: false}, + {filename: "/home/user/README.md", reject: true}, + } + + patterns := []string{"*.go", "README.md", "/home/user/foobar/*"} + + for _, tc := range tests { + t.Run("", func(t *testing.T) { + reject := rejectByPattern(patterns) + res := reject(tc.filename) + if res != tc.reject { + t.Fatalf("wrong result for filename %v: want %v, got %v", + tc.filename, tc.reject, res) + } + }) + } +} + +func TestIsExcludedByFile(t *testing.T) { + const ( + tagFilename = "CACHEDIR.TAG" + header = "Signature: 8a477f597d28d172789f06886806bc55" + ) + tests := []struct { + name string + tagFile string + content string + want bool + }{ + {"NoTagfile", "", "", false}, + {"EmptyTagfile", tagFilename, "", true}, + {"UnnamedTagFile", "", header, false}, + {"WrongTagFile", "notatagfile", header, false}, + {"IncorrectSig", tagFilename, header[1:], false}, + {"ValidSig", tagFilename, header, true}, + {"ValidPlusStuff", tagFilename, header + "foo", true}, + {"ValidPlusNewlineAndStuff", tagFilename, header + "\nbar", true}, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + tempDir, cleanup := test.TempDir(t) + defer cleanup() + + foo := filepath.Join(tempDir, "foo") + err := ioutil.WriteFile(foo, []byte("foo"), 0666) + if err != nil { + t.Fatalf("could not write file: %v", err) + } + if tc.tagFile != "" { + tagFile := filepath.Join(tempDir, tc.tagFile) + err = ioutil.WriteFile(tagFile, []byte(tc.content), 0666) + if err != nil { + t.Fatalf("could not write tagfile: %v", err) + } + } + h := header + if tc.content == "" { + h = "" + } + if got := isExcludedByFile(foo, tagFilename, h, nil); tc.want != got { + t.Fatalf("expected %v, got %v", tc.want, got) + } + }) + } +} + +// TestMultipleIsExcludedByFile is for testing that multiple instances of +// the --exclude-if-present parameter (or the shortcut --exclude-caches do not +// cancel each other out. It was initially written to demonstrate a bug in +// rejectIfPresent. +func TestMultipleIsExcludedByFile(t *testing.T) { + tempDir, cleanup := test.TempDir(t) + defer cleanup() + + // Create some files in a temporary directory. + // Files in UPPERCASE will be used as exclusion triggers later on. + // We will test the inclusion later, so we add the expected value as + // a bool. + files := []struct { + path string + incl bool + }{ + {"42", true}, + + // everything in foodir except the NOFOO tagfile + // should not be included. + {"foodir/NOFOO", true}, + {"foodir/foo", false}, + {"foodir/foosub/underfoo", false}, + + // everything in bardir except the NOBAR tagfile + // should not be included. + {"bardir/NOBAR", true}, + {"bardir/bar", false}, + {"bardir/barsub/underbar", false}, + + // everything in bazdir should be included. + {"bazdir/baz", true}, + {"bazdir/bazsub/underbaz", true}, + } + var errs []error + for _, f := range files { + // create directories first, then the file + p := filepath.Join(tempDir, filepath.FromSlash(f.path)) + errs = append(errs, os.MkdirAll(filepath.Dir(p), 0700)) + errs = append(errs, ioutil.WriteFile(p, []byte(f.path), 0600)) + } + test.OKs(t, errs) // see if anything went wrong during the creation + + // create two rejection functions, one that tests for the NOFOO file + // and one for the NOBAR file + fooExclude, _ := rejectIfPresent("NOFOO") + barExclude, _ := rejectIfPresent("NOBAR") + + // To mock the archiver scanning walk, we create filepath.WalkFn + // that tests against the two rejection functions and stores + // the result in a map against we can test later. + m := make(map[string]bool) + walk := func(p string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + excludedByFoo := fooExclude(p) + excludedByBar := barExclude(p) + excluded := excludedByFoo || excludedByBar + // the log message helps debugging in case the test fails + t.Logf("%q: %v || %v = %v", p, excludedByFoo, excludedByBar, excluded) + m[p] = !excluded + if excluded { + return filepath.SkipDir + } + return nil + } + // walk through the temporary file and check the error + test.OK(t, filepath.Walk(tempDir, walk)) + + // compare whether the walk gave the expected values for the test cases + for _, f := range files { + p := filepath.Join(tempDir, filepath.FromSlash(f.path)) + if m[p] != f.incl { + t.Errorf("inclusion status of %s is wrong: want %v, got %v", f.path, f.incl, m[p]) + } + } +} diff --git a/cmd/restic/find.go b/cmd/restic/find.go new file mode 100644 index 000000000..e48a6ab55 --- /dev/null +++ b/cmd/restic/find.go @@ -0,0 +1,76 @@ +package main + +import ( + "context" + + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" +) + +// FindFilteredSnapshots yields Snapshots, either given explicitly by `snapshotIDs` or filtered from the list of all snapshots. +func FindFilteredSnapshots(ctx context.Context, repo *repository.Repository, host string, tags []restic.TagList, paths []string, snapshotIDs []string) <-chan *restic.Snapshot { + out := make(chan *restic.Snapshot) + go func() { + defer close(out) + if len(snapshotIDs) != 0 { + var ( + id restic.ID + usedFilter bool + err error + ) + ids := make(restic.IDs, 0, len(snapshotIDs)) + // Process all snapshot IDs given as arguments. + for _, s := range snapshotIDs { + if s == "latest" { + id, err = restic.FindLatestSnapshot(ctx, repo, paths, tags, host) + if err != nil { + Warnf("Ignoring %q, no snapshot matched given filter (Paths:%v Tags:%v Host:%v)\n", s, paths, tags, host) + usedFilter = true + continue + } + } else { + id, err = restic.FindSnapshot(repo, s) + if err != nil { + Warnf("Ignoring %q, it is not a snapshot id\n", s) + continue + } + } + ids = append(ids, id) + } + + // Give the user some indication their filters are not used. + if !usedFilter && (host != "" || len(tags) != 0 || len(paths) != 0) { + Warnf("Ignoring filters as there are explicit snapshot ids given\n") + } + + for _, id := range ids.Uniq() { + sn, err := restic.LoadSnapshot(ctx, repo, id) + if err != nil { + Warnf("Ignoring %q, could not load snapshot: %v\n", id, err) + continue + } + select { + case <-ctx.Done(): + return + case out <- sn: + } + } + return + } + + snapshots, err := restic.FindFilteredSnapshots(ctx, repo, host, tags, paths) + if err != nil { + Warnf("could not load snapshots: %v\n", err) + return + } + + for _, sn := range snapshots { + select { + case <-ctx.Done(): + return + case out <- sn: + } + } + }() + return out +} diff --git a/cmd/restic/flags_test.go b/cmd/restic/flags_test.go new file mode 100644 index 000000000..b7f88e906 --- /dev/null +++ b/cmd/restic/flags_test.go @@ -0,0 +1,24 @@ +package main + +import ( + "io/ioutil" + "testing" +) + +// TestFlags checks for double defined flags, the commands will panic on +// ParseFlags() when a shorthand flag is defined twice. +func TestFlags(t *testing.T) { + for _, cmd := range cmdRoot.Commands() { + t.Run(cmd.Name(), func(t *testing.T) { + cmd.Flags().SetOutput(ioutil.Discard) + err := cmd.ParseFlags([]string{"--help"}) + if err.Error() == "pflag: help requested" { + err = nil + } + + if err != nil { + t.Fatal(err) + } + }) + } +} diff --git a/cmd/restic/format.go b/cmd/restic/format.go new file mode 100644 index 000000000..d623371b8 --- /dev/null +++ b/cmd/restic/format.go @@ -0,0 +1,95 @@ +package main + +import ( + "fmt" + "os" + "time" + + "github.com/restic/restic/internal/restic" +) + +func formatBytes(c uint64) string { + b := float64(c) + + switch { + case c > 1<<40: + return fmt.Sprintf("%.3f TiB", b/(1<<40)) + case c > 1<<30: + return fmt.Sprintf("%.3f GiB", b/(1<<30)) + case c > 1<<20: + return fmt.Sprintf("%.3f MiB", b/(1<<20)) + case c > 1<<10: + return fmt.Sprintf("%.3f KiB", b/(1<<10)) + default: + return fmt.Sprintf("%d B", c) + } +} + +func formatSeconds(sec uint64) string { + hours := sec / 3600 + sec -= hours * 3600 + min := sec / 60 + sec -= min * 60 + if hours > 0 { + return fmt.Sprintf("%d:%02d:%02d", hours, min, sec) + } + + return fmt.Sprintf("%d:%02d", min, sec) +} + +func formatPercent(numerator uint64, denominator uint64) string { + if denominator == 0 { + return "" + } + + percent := 100.0 * float64(numerator) / float64(denominator) + + if percent > 100 { + percent = 100 + } + + return fmt.Sprintf("%3.2f%%", percent) +} + +func formatRate(bytes uint64, duration time.Duration) string { + sec := float64(duration) / float64(time.Second) + rate := float64(bytes) / sec / (1 << 20) + return fmt.Sprintf("%.2fMiB/s", rate) +} + +func formatDuration(d time.Duration) string { + sec := uint64(d / time.Second) + return formatSeconds(sec) +} + +func formatNode(path string, n *restic.Node, long bool) string { + if !long { + return path + } + + var mode os.FileMode + var target string + + switch n.Type { + case "file": + mode = 0 + case "dir": + mode = os.ModeDir + case "symlink": + mode = os.ModeSymlink + target = fmt.Sprintf(" -> %v", n.LinkTarget) + case "dev": + mode = os.ModeDevice + case "chardev": + mode = os.ModeDevice | os.ModeCharDevice + case "fifo": + mode = os.ModeNamedPipe + case "socket": + mode = os.ModeSocket + } + + return fmt.Sprintf("%s %5d %5d %6d %s %s%s", + mode|n.Mode, n.UID, n.GID, n.Size, + n.ModTime.Local().Format(TimeFormat), path, + target) +} diff --git a/cmd/restic/global.go b/cmd/restic/global.go new file mode 100644 index 000000000..acff7ada6 --- /dev/null +++ b/cmd/restic/global.go @@ -0,0 +1,686 @@ +package main + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strings" + "syscall" + "time" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/backend/azure" + "github.com/restic/restic/internal/backend/b2" + "github.com/restic/restic/internal/backend/gs" + "github.com/restic/restic/internal/backend/local" + "github.com/restic/restic/internal/backend/location" + "github.com/restic/restic/internal/backend/rclone" + "github.com/restic/restic/internal/backend/rest" + "github.com/restic/restic/internal/backend/s3" + "github.com/restic/restic/internal/backend/sftp" + "github.com/restic/restic/internal/backend/swift" + "github.com/restic/restic/internal/cache" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/limiter" + "github.com/restic/restic/internal/options" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/textfile" + + "github.com/restic/restic/internal/errors" + + "golang.org/x/crypto/ssh/terminal" + "os/exec" +) + +var version = "0.9.4" + +// TimeFormat is the format used for all timestamps printed by restic. +const TimeFormat = "2006-01-02 15:04:05" + +// GlobalOptions hold all global options for restic. +type GlobalOptions struct { + Repo string + PasswordFile string + PasswordCommand string + KeyHint string + Quiet bool + Verbose int + NoLock bool + JSON bool + CacheDir string + NoCache bool + CACerts []string + TLSClientCert string + CleanupCache bool + + LimitUploadKb int + LimitDownloadKb int + + ctx context.Context + password string + stdout io.Writer + stderr io.Writer + + // verbosity is set as follows: + // 0 means: don't print any messages except errors, this is used when --quiet is specified + // 1 is the default: print essential messages + // 2 means: print more messages, report minor things, this is used when --verbose is specified + // 3 means: print very detailed debug messages, this is used when --verbose 2 is specified + verbosity uint + + Options []string + + extended options.Options +} + +var globalOptions = GlobalOptions{ + stdout: os.Stdout, + stderr: os.Stderr, +} + +func init() { + var cancel context.CancelFunc + globalOptions.ctx, cancel = context.WithCancel(context.Background()) + AddCleanupHandler(func() error { + cancel() + return nil + }) + + f := cmdRoot.PersistentFlags() + f.StringVarP(&globalOptions.Repo, "repo", "r", os.Getenv("RESTIC_REPOSITORY"), "repository to backup to or restore from (default: $RESTIC_REPOSITORY)") + f.StringVarP(&globalOptions.PasswordFile, "password-file", "p", os.Getenv("RESTIC_PASSWORD_FILE"), "read the repository password from a file (default: $RESTIC_PASSWORD_FILE)") + f.StringVarP(&globalOptions.KeyHint, "key-hint", "", os.Getenv("RESTIC_KEY_HINT"), "key ID of key to try decrypting first (default: $RESTIC_KEY_HINT)") + f.StringVarP(&globalOptions.PasswordCommand, "password-command", "", os.Getenv("RESTIC_PASSWORD_COMMAND"), "specify a shell command to obtain a password (default: $RESTIC_PASSWORD_COMMAND)") + f.BoolVarP(&globalOptions.Quiet, "quiet", "q", false, "do not output comprehensive progress report") + f.CountVarP(&globalOptions.Verbose, "verbose", "v", "be verbose (specify --verbose multiple times or level `n`)") + f.BoolVar(&globalOptions.NoLock, "no-lock", false, "do not lock the repo, this allows some operations on read-only repos") + f.BoolVarP(&globalOptions.JSON, "json", "", false, "set output mode to JSON for commands that support it") + f.StringVar(&globalOptions.CacheDir, "cache-dir", "", "set the cache directory. (default: use system default cache directory)") + f.BoolVar(&globalOptions.NoCache, "no-cache", false, "do not use a local cache") + f.StringSliceVar(&globalOptions.CACerts, "cacert", nil, "`file` to load root certificates from (default: use system certificates)") + f.StringVar(&globalOptions.TLSClientCert, "tls-client-cert", "", "path to a file containing PEM encoded TLS client certificate and private key") + f.BoolVar(&globalOptions.CleanupCache, "cleanup-cache", false, "auto remove old cache directories") + f.IntVar(&globalOptions.LimitUploadKb, "limit-upload", 0, "limits uploads to a maximum rate in KiB/s. (default: unlimited)") + f.IntVar(&globalOptions.LimitDownloadKb, "limit-download", 0, "limits downloads to a maximum rate in KiB/s. (default: unlimited)") + f.StringSliceVarP(&globalOptions.Options, "option", "o", []string{}, "set extended option (`key=value`, can be specified multiple times)") + + restoreTerminal() +} + +// checkErrno returns nil when err is set to syscall.Errno(0), since this is no +// error condition. +func checkErrno(err error) error { + e, ok := err.(syscall.Errno) + if !ok { + return err + } + + if e == 0 { + return nil + } + + return err +} + +func stdinIsTerminal() bool { + return terminal.IsTerminal(int(os.Stdin.Fd())) +} + +func stdoutIsTerminal() bool { + return terminal.IsTerminal(int(os.Stdout.Fd())) +} + +func stdoutTerminalWidth() int { + w, _, err := terminal.GetSize(int(os.Stdout.Fd())) + if err != nil { + return 0 + } + return w +} + +// restoreTerminal installs a cleanup handler that restores the previous +// terminal state on exit. +func restoreTerminal() { + if !stdoutIsTerminal() { + return + } + + fd := int(os.Stdout.Fd()) + state, err := terminal.GetState(fd) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to get terminal state: %v\n", err) + return + } + + AddCleanupHandler(func() error { + err := checkErrno(terminal.Restore(fd, state)) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to get restore terminal state: %#+v\n", err) + } + return err + }) +} + +// ClearLine creates a platform dependent string to clear the current +// line, so it can be overwritten. ANSI sequences are not supported on +// current windows cmd shell. +func ClearLine() string { + if runtime.GOOS == "windows" { + if w := stdoutTerminalWidth(); w > 0 { + return strings.Repeat(" ", w-1) + "\r" + } + return "" + } + return "\x1b[2K" +} + +// Printf writes the message to the configured stdout stream. +func Printf(format string, args ...interface{}) { + _, err := fmt.Fprintf(globalOptions.stdout, format, args...) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to write to stdout: %v\n", err) + } +} + +// Verbosef calls Printf to write the message when the verbose flag is set. +func Verbosef(format string, args ...interface{}) { + if globalOptions.verbosity >= 1 { + Printf(format, args...) + } +} + +// PrintProgress wraps fmt.Printf to handle the difference in writing progress +// information to terminals and non-terminal stdout +func PrintProgress(format string, args ...interface{}) { + var ( + message string + carriageControl string + ) + message = fmt.Sprintf(format, args...) + + if !(strings.HasSuffix(message, "\r") || strings.HasSuffix(message, "\n")) { + if stdoutIsTerminal() { + carriageControl = "\r" + } else { + carriageControl = "\n" + } + message = fmt.Sprintf("%s%s", message, carriageControl) + } + + if stdoutIsTerminal() { + message = fmt.Sprintf("%s%s", ClearLine(), message) + } + + fmt.Print(message) +} + +// Warnf writes the message to the configured stderr stream. +func Warnf(format string, args ...interface{}) { + _, err := fmt.Fprintf(globalOptions.stderr, format, args...) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to write to stderr: %v\n", err) + } +} + +// Exitf uses Warnf to write the message and then terminates the process with +// the given exit code. +func Exitf(exitcode int, format string, args ...interface{}) { + if format[len(format)-1] != '\n' { + format += "\n" + } + + Warnf(format, args...) + Exit(exitcode) +} + +// resolvePassword determines the password to be used for opening the repository. +func resolvePassword(opts GlobalOptions) (string, error) { + if opts.PasswordFile != "" && opts.PasswordCommand != "" { + return "", errors.Fatalf("Password file and command are mutually exclusive options") + } + if opts.PasswordCommand != "" { + args, err := backend.SplitShellStrings(opts.PasswordCommand) + if err != nil { + return "", err + } + cmd := exec.Command(args[0], args[1:]...) + cmd.Stderr = os.Stderr + output, err := cmd.Output() + if err != nil { + return "", err + } + return (strings.TrimSpace(string(output))), nil + } + if opts.PasswordFile != "" { + s, err := textfile.Read(opts.PasswordFile) + if os.IsNotExist(errors.Cause(err)) { + return "", errors.Fatalf("%s does not exist", opts.PasswordFile) + } + return strings.TrimSpace(string(s)), errors.Wrap(err, "Readfile") + } + + if pwd := os.Getenv("RESTIC_PASSWORD"); pwd != "" { + return pwd, nil + } + + return "", nil +} + +// readPassword reads the password from the given reader directly. +func readPassword(in io.Reader) (password string, err error) { + buf := make([]byte, 1000) + n, err := io.ReadFull(in, buf) + buf = buf[:n] + + if err != nil && errors.Cause(err) != io.ErrUnexpectedEOF { + return "", errors.Wrap(err, "ReadFull") + } + + return strings.TrimRight(string(buf), "\r\n"), nil +} + +// readPasswordTerminal reads the password from the given reader which must be a +// tty. Prompt is printed on the writer out before attempting to read the +// password. +func readPasswordTerminal(in *os.File, out io.Writer, prompt string) (password string, err error) { + fmt.Fprint(out, prompt) + buf, err := terminal.ReadPassword(int(in.Fd())) + fmt.Fprintln(out) + if err != nil { + return "", errors.Wrap(err, "ReadPassword") + } + + password = string(buf) + return password, nil +} + +// ReadPassword reads the password from a password file, the environment +// variable RESTIC_PASSWORD or prompts the user. +func ReadPassword(opts GlobalOptions, prompt string) (string, error) { + if opts.password != "" { + return opts.password, nil + } + + var ( + password string + err error + ) + + if stdinIsTerminal() { + password, err = readPasswordTerminal(os.Stdin, os.Stderr, prompt) + } else { + password, err = readPassword(os.Stdin) + Verbosef("read password from stdin\n") + } + + if err != nil { + return "", errors.Wrap(err, "unable to read password") + } + + if len(password) == 0 { + return "", errors.Fatal("an empty password is not a password") + } + + return password, nil +} + +// ReadPasswordTwice calls ReadPassword two times and returns an error when the +// passwords don't match. +func ReadPasswordTwice(gopts GlobalOptions, prompt1, prompt2 string) (string, error) { + pw1, err := ReadPassword(gopts, prompt1) + if err != nil { + return "", err + } + pw2, err := ReadPassword(gopts, prompt2) + if err != nil { + return "", err + } + + if pw1 != pw2 { + return "", errors.Fatal("passwords do not match") + } + + return pw1, nil +} + +const maxKeys = 20 + +// OpenRepository reads the password and opens the repository. +func OpenRepository(opts GlobalOptions) (*repository.Repository, error) { + if opts.Repo == "" { + return nil, errors.Fatal("Please specify repository location (-r)") + } + + be, err := open(opts.Repo, opts, opts.extended) + if err != nil { + return nil, err + } + + be = backend.NewRetryBackend(be, 10, func(msg string, err error, d time.Duration) { + Warnf("%v returned error, retrying after %v: %v\n", msg, d, err) + }) + + s := repository.New(be) + + opts.password, err = ReadPassword(opts, "enter password for repository: ") + if err != nil { + return nil, err + } + + err = s.SearchKey(opts.ctx, opts.password, maxKeys, opts.KeyHint) + if err != nil { + return nil, err + } + + if stdoutIsTerminal() { + id := s.Config().ID + if len(id) > 8 { + id = id[:8] + } + if !opts.JSON { + Verbosef("repository %v opened successfully, password is correct\n", id) + } + } + + if opts.NoCache { + return s, nil + } + + c, err := cache.New(s.Config().ID, opts.CacheDir) + if err != nil { + Warnf("unable to open cache: %v\n", err) + return s, nil + } + + if c.Created && !opts.JSON { + Verbosef("created new cache in %v\n", c.Base) + } + + // start using the cache + s.UseCache(c) + + oldCacheDirs, err := cache.Old(c.Base) + if err != nil { + Warnf("unable to find old cache directories: %v", err) + } + + // nothing more to do if no old cache dirs could be found + if len(oldCacheDirs) == 0 { + return s, nil + } + + // cleanup old cache dirs if instructed to do so + if opts.CleanupCache { + Printf("removing %d old cache dirs from %v\n", len(oldCacheDirs), c.Base) + + for _, item := range oldCacheDirs { + dir := filepath.Join(c.Base, item.Name()) + err = fs.RemoveAll(dir) + if err != nil { + Warnf("unable to remove %v: %v\n", dir, err) + } + } + } else { + if stdoutIsTerminal() { + Verbosef("found %d old cache directories in %v, pass --cleanup-cache to remove them\n", + len(oldCacheDirs), c.Base) + } + } + + return s, nil +} + +func parseConfig(loc location.Location, opts options.Options) (interface{}, error) { + // only apply options for a particular backend here + opts = opts.Extract(loc.Scheme) + + switch loc.Scheme { + case "local": + cfg := loc.Config.(local.Config) + if err := opts.Apply(loc.Scheme, &cfg); err != nil { + return nil, err + } + + debug.Log("opening local repository at %#v", cfg) + return cfg, nil + + case "sftp": + cfg := loc.Config.(sftp.Config) + if err := opts.Apply(loc.Scheme, &cfg); err != nil { + return nil, err + } + + debug.Log("opening sftp repository at %#v", cfg) + return cfg, nil + + case "s3": + cfg := loc.Config.(s3.Config) + if cfg.KeyID == "" { + cfg.KeyID = os.Getenv("AWS_ACCESS_KEY_ID") + } + + if cfg.Secret == "" { + cfg.Secret = os.Getenv("AWS_SECRET_ACCESS_KEY") + } + + if err := opts.Apply(loc.Scheme, &cfg); err != nil { + return nil, err + } + + debug.Log("opening s3 repository at %#v", cfg) + return cfg, nil + + case "gs": + cfg := loc.Config.(gs.Config) + if cfg.ProjectID == "" { + cfg.ProjectID = os.Getenv("GOOGLE_PROJECT_ID") + } + + if err := opts.Apply(loc.Scheme, &cfg); err != nil { + return nil, err + } + + debug.Log("opening gs repository at %#v", cfg) + return cfg, nil + + case "azure": + cfg := loc.Config.(azure.Config) + if cfg.AccountName == "" { + cfg.AccountName = os.Getenv("AZURE_ACCOUNT_NAME") + } + + if cfg.AccountKey == "" { + cfg.AccountKey = os.Getenv("AZURE_ACCOUNT_KEY") + } + + if err := opts.Apply(loc.Scheme, &cfg); err != nil { + return nil, err + } + + debug.Log("opening gs repository at %#v", cfg) + return cfg, nil + + case "swift": + cfg := loc.Config.(swift.Config) + + if err := swift.ApplyEnvironment("", &cfg); err != nil { + return nil, err + } + + if err := opts.Apply(loc.Scheme, &cfg); err != nil { + return nil, err + } + + debug.Log("opening swift repository at %#v", cfg) + return cfg, nil + + case "b2": + cfg := loc.Config.(b2.Config) + + if cfg.AccountID == "" { + cfg.AccountID = os.Getenv("B2_ACCOUNT_ID") + } + + if cfg.AccountID == "" { + return nil, errors.Fatalf("unable to open B2 backend: Account ID ($B2_ACCOUNT_ID) is empty") + } + + if cfg.Key == "" { + cfg.Key = os.Getenv("B2_ACCOUNT_KEY") + } + + if cfg.Key == "" { + return nil, errors.Fatalf("unable to open B2 backend: Key ($B2_ACCOUNT_KEY) is empty") + } + + if err := opts.Apply(loc.Scheme, &cfg); err != nil { + return nil, err + } + + debug.Log("opening b2 repository at %#v", cfg) + return cfg, nil + case "rest": + cfg := loc.Config.(rest.Config) + if err := opts.Apply(loc.Scheme, &cfg); err != nil { + return nil, err + } + + debug.Log("opening rest repository at %#v", cfg) + return cfg, nil + case "rclone": + cfg := loc.Config.(rclone.Config) + if err := opts.Apply(loc.Scheme, &cfg); err != nil { + return nil, err + } + + debug.Log("opening rest repository at %#v", cfg) + return cfg, nil + } + + return nil, errors.Fatalf("invalid backend: %q", loc.Scheme) +} + +// Open the backend specified by a location config. +func open(s string, gopts GlobalOptions, opts options.Options) (restic.Backend, error) { + debug.Log("parsing location %v", s) + loc, err := location.Parse(s) + if err != nil { + return nil, errors.Fatalf("parsing repository location failed: %v", err) + } + + var be restic.Backend + + cfg, err := parseConfig(loc, opts) + if err != nil { + return nil, err + } + + tropts := backend.TransportOptions{ + RootCertFilenames: globalOptions.CACerts, + TLSClientCertKeyFilename: globalOptions.TLSClientCert, + } + rt, err := backend.Transport(tropts) + if err != nil { + return nil, err + } + + // wrap the transport so that the throughput via HTTP is limited + lim := limiter.NewStaticLimiter(gopts.LimitUploadKb, gopts.LimitDownloadKb) + rt = lim.Transport(rt) + + switch loc.Scheme { + case "local": + be, err = local.Open(cfg.(local.Config)) + // wrap the backend in a LimitBackend so that the throughput is limited + be = limiter.LimitBackend(be, lim) + case "sftp": + be, err = sftp.Open(cfg.(sftp.Config)) + // wrap the backend in a LimitBackend so that the throughput is limited + be = limiter.LimitBackend(be, lim) + case "s3": + be, err = s3.Open(cfg.(s3.Config), rt) + case "gs": + be, err = gs.Open(cfg.(gs.Config), rt) + case "azure": + be, err = azure.Open(cfg.(azure.Config), rt) + case "swift": + be, err = swift.Open(cfg.(swift.Config), rt) + case "b2": + be, err = b2.Open(globalOptions.ctx, cfg.(b2.Config), rt) + case "rest": + be, err = rest.Open(cfg.(rest.Config), rt) + case "rclone": + be, err = rclone.Open(cfg.(rclone.Config), lim) + + default: + return nil, errors.Fatalf("invalid backend: %q", loc.Scheme) + } + + if err != nil { + return nil, errors.Fatalf("unable to open repo at %v: %v", s, err) + } + + // check if config is there + fi, err := be.Stat(globalOptions.ctx, restic.Handle{Type: restic.ConfigFile}) + if err != nil { + return nil, errors.Fatalf("unable to open config file: %v\nIs there a repository at the following location?\n%v", err, s) + } + + if fi.Size == 0 { + return nil, errors.New("config file has zero size, invalid repository?") + } + + return be, nil +} + +// Create the backend specified by URI. +func create(s string, opts options.Options) (restic.Backend, error) { + debug.Log("parsing location %v", s) + loc, err := location.Parse(s) + if err != nil { + return nil, err + } + + cfg, err := parseConfig(loc, opts) + if err != nil { + return nil, err + } + + tropts := backend.TransportOptions{ + RootCertFilenames: globalOptions.CACerts, + TLSClientCertKeyFilename: globalOptions.TLSClientCert, + } + rt, err := backend.Transport(tropts) + if err != nil { + return nil, err + } + + switch loc.Scheme { + case "local": + return local.Create(cfg.(local.Config)) + case "sftp": + return sftp.Create(cfg.(sftp.Config)) + case "s3": + return s3.Create(cfg.(s3.Config), rt) + case "gs": + return gs.Create(cfg.(gs.Config), rt) + case "azure": + return azure.Create(cfg.(azure.Config), rt) + case "swift": + return swift.Open(cfg.(swift.Config), rt) + case "b2": + return b2.Create(globalOptions.ctx, cfg.(b2.Config), rt) + case "rest": + return rest.Create(cfg.(rest.Config), rt) + case "rclone": + return rclone.Open(cfg.(rclone.Config), nil) + } + + debug.Log("invalid repository scheme: %v", s) + return nil, errors.Fatalf("invalid scheme %q", loc.Scheme) +} diff --git a/cmd/restic/global_debug.go b/cmd/restic/global_debug.go new file mode 100644 index 000000000..6f04d047b --- /dev/null +++ b/cmd/restic/global_debug.go @@ -0,0 +1,97 @@ +// +build debug profile + +package main + +import ( + "fmt" + "net/http" + _ "net/http/pprof" + "os" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/repository" + + "github.com/pkg/profile" +) + +var ( + listenProfile string + memProfilePath string + cpuProfilePath string + traceProfilePath string + blockProfilePath string + insecure bool +) + +func init() { + f := cmdRoot.PersistentFlags() + f.StringVar(&listenProfile, "listen-profile", "", "listen on this `address:port` for memory profiling") + f.StringVar(&memProfilePath, "mem-profile", "", "write memory profile to `dir`") + f.StringVar(&cpuProfilePath, "cpu-profile", "", "write cpu profile to `dir`") + f.StringVar(&traceProfilePath, "trace-profile", "", "write trace to `dir`") + f.StringVar(&blockProfilePath, "block-profile", "", "write block profile to `dir`") + f.BoolVar(&insecure, "insecure-kdf", false, "use insecure KDF settings") +} + +type fakeTestingTB struct{} + +func (fakeTestingTB) Logf(msg string, args ...interface{}) { + fmt.Fprintf(os.Stderr, msg, args...) +} + +func runDebug() error { + if listenProfile != "" { + fmt.Fprintf(os.Stderr, "running profile HTTP server on %v\n", listenProfile) + go func() { + err := http.ListenAndServe(listenProfile, nil) + if err != nil { + fmt.Fprintf(os.Stderr, "profile HTTP server listen failed: %v\n", err) + } + }() + } + + profilesEnabled := 0 + if memProfilePath != "" { + profilesEnabled++ + } + if cpuProfilePath != "" { + profilesEnabled++ + } + if traceProfilePath != "" { + profilesEnabled++ + } + if blockProfilePath != "" { + profilesEnabled++ + } + + if profilesEnabled > 1 { + return errors.Fatal("only one profile (memory, CPU, trace, or block) may be activated at the same time") + } + + var prof interface { + Stop() + } + + if memProfilePath != "" { + prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.MemProfile, profile.ProfilePath(memProfilePath)) + } else if cpuProfilePath != "" { + prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.CPUProfile, profile.ProfilePath(cpuProfilePath)) + } else if traceProfilePath != "" { + prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.TraceProfile, profile.ProfilePath(traceProfilePath)) + } else if blockProfilePath != "" { + prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.BlockProfile, profile.ProfilePath(blockProfilePath)) + } + + if prof != nil { + AddCleanupHandler(func() error { + prof.Stop() + return nil + }) + } + + if insecure { + repository.TestUseLowSecurityKDFParameters(fakeTestingTB{}) + } + + return nil +} diff --git a/cmd/restic/global_release.go b/cmd/restic/global_release.go new file mode 100644 index 000000000..f17d99639 --- /dev/null +++ b/cmd/restic/global_release.go @@ -0,0 +1,6 @@ +// +build !debug,!profile + +package main + +// runDebug is a noop without the debug tag. +func runDebug() error { return nil } diff --git a/cmd/restic/integration_fuse_test.go b/cmd/restic/integration_fuse_test.go new file mode 100644 index 000000000..57a57ae97 --- /dev/null +++ b/cmd/restic/integration_fuse_test.go @@ -0,0 +1,220 @@ +// +build !netbsd +// +build !openbsd +// +build !solaris +// +build !windows + +package main + +import ( + "context" + "fmt" + "os" + "path/filepath" + "testing" + "time" + + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +const ( + mountWait = 20 + mountSleep = 100 * time.Millisecond + mountTestSubdir = "snapshots" +) + +func snapshotsDirExists(t testing.TB, dir string) bool { + f, err := os.Open(filepath.Join(dir, mountTestSubdir)) + if err != nil && os.IsNotExist(err) { + return false + } + + if err != nil { + t.Error(err) + } + + if err := f.Close(); err != nil { + t.Error(err) + } + + return true +} + +// waitForMount blocks (max mountWait * mountSleep) until the subdir +// "snapshots" appears in the dir. +func waitForMount(t testing.TB, dir string) { + for i := 0; i < mountWait; i++ { + if snapshotsDirExists(t, dir) { + t.Log("mounted directory is ready") + return + } + + time.Sleep(mountSleep) + } + + t.Errorf("subdir %q of dir %s never appeared", mountTestSubdir, dir) +} + +func testRunMount(t testing.TB, gopts GlobalOptions, dir string) { + opts := MountOptions{ + SnapshotTemplate: time.RFC3339, + } + rtest.OK(t, runMount(opts, gopts, []string{dir})) +} + +func testRunUmount(t testing.TB, gopts GlobalOptions, dir string) { + var err error + for i := 0; i < mountWait; i++ { + if err = umount(dir); err == nil { + t.Logf("directory %v umounted", dir) + return + } + + time.Sleep(mountSleep) + } + + t.Errorf("unable to umount dir %v, last error was: %v", dir, err) +} + +func listSnapshots(t testing.TB, dir string) []string { + snapshotsDir, err := os.Open(filepath.Join(dir, "snapshots")) + rtest.OK(t, err) + names, err := snapshotsDir.Readdirnames(-1) + rtest.OK(t, err) + rtest.OK(t, snapshotsDir.Close()) + return names +} + +func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Repository, mountpoint, repodir string, snapshotIDs restic.IDs, expectedSnapshotsInFuseDir int) { + t.Logf("checking for %d snapshots: %v", len(snapshotIDs), snapshotIDs) + + go testRunMount(t, global, mountpoint) + waitForMount(t, mountpoint) + defer testRunUmount(t, global, mountpoint) + + if !snapshotsDirExists(t, mountpoint) { + t.Fatal(`virtual directory "snapshots" doesn't exist`) + } + + ids := listSnapshots(t, repodir) + t.Logf("found %v snapshots in repo: %v", len(ids), ids) + + namesInSnapshots := listSnapshots(t, mountpoint) + t.Logf("found %v snapshots in fuse mount: %v", len(namesInSnapshots), namesInSnapshots) + rtest.Assert(t, + expectedSnapshotsInFuseDir == len(namesInSnapshots), + "Invalid number of snapshots: expected %d, got %d", expectedSnapshotsInFuseDir, len(namesInSnapshots)) + + namesMap := make(map[string]bool) + for _, name := range namesInSnapshots { + namesMap[name] = false + } + + // Is "latest" present? + if len(namesMap) != 0 { + _, ok := namesMap["latest"] + if !ok { + t.Errorf("Symlink latest isn't present in fuse dir") + } else { + namesMap["latest"] = true + } + } + + for _, id := range snapshotIDs { + snapshot, err := restic.LoadSnapshot(context.TODO(), repo, id) + rtest.OK(t, err) + + ts := snapshot.Time.Format(time.RFC3339) + present, ok := namesMap[ts] + if !ok { + t.Errorf("Snapshot %v (%q) isn't present in fuse dir", id.Str(), ts) + } + + for i := 1; present; i++ { + ts = fmt.Sprintf("%s-%d", snapshot.Time.Format(time.RFC3339), i) + present, ok = namesMap[ts] + if !ok { + t.Errorf("Snapshot %v (%q) isn't present in fuse dir", id.Str(), ts) + } + + if !present { + break + } + } + + namesMap[ts] = true + } + + for name, present := range namesMap { + rtest.Assert(t, present, "Directory %s is present in fuse dir but is not a snapshot", name) + } +} + +func TestMount(t *testing.T) { + if !rtest.RunFuseTest { + t.Skip("Skipping fuse tests") + } + + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testRunInit(t, env.gopts) + + repo, err := OpenRepository(env.gopts) + rtest.OK(t, err) + + // We remove the mountpoint now to check that cmdMount creates it + rtest.RemoveAll(t, env.mountpoint) + + checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, []restic.ID{}, 0) + + rtest.SetupTarTestFixture(t, env.testdata, filepath.Join("testdata", "backup-data.tar.gz")) + + // first backup + testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts) + snapshotIDs := testRunList(t, "snapshots", env.gopts) + rtest.Assert(t, len(snapshotIDs) == 1, + "expected one snapshot, got %v", snapshotIDs) + + checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 2) + + // second backup, implicit incremental + testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts) + snapshotIDs = testRunList(t, "snapshots", env.gopts) + rtest.Assert(t, len(snapshotIDs) == 2, + "expected two snapshots, got %v", snapshotIDs) + + checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 3) + + // third backup, explicit incremental + bopts := BackupOptions{Parent: snapshotIDs[0].String()} + testRunBackup(t, "", []string{env.testdata}, bopts, env.gopts) + snapshotIDs = testRunList(t, "snapshots", env.gopts) + rtest.Assert(t, len(snapshotIDs) == 3, + "expected three snapshots, got %v", snapshotIDs) + + checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 4) +} + +func TestMountSameTimestamps(t *testing.T) { + if !rtest.RunFuseTest { + t.Skip("Skipping fuse tests") + } + + env, cleanup := withTestEnvironment(t) + defer cleanup() + + rtest.SetupTarTestFixture(t, env.base, filepath.Join("testdata", "repo-same-timestamps.tar.gz")) + + repo, err := OpenRepository(env.gopts) + rtest.OK(t, err) + + ids := []restic.ID{ + restic.TestParseID("280303689e5027328889a06d718b729e96a1ce6ae9ef8290bff550459ae611ee"), + restic.TestParseID("75ad6cdc0868e082f2596d5ab8705e9f7d87316f5bf5690385eeff8dbe49d9f5"), + restic.TestParseID("5fd0d8b2ef0fa5d23e58f1e460188abb0f525c0f0c4af8365a1280c807a80a1b"), + } + + checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, ids, 4) +} diff --git a/cmd/restic/integration_helpers_test.go b/cmd/restic/integration_helpers_test.go new file mode 100644 index 000000000..d277184f2 --- /dev/null +++ b/cmd/restic/integration_helpers_test.go @@ -0,0 +1,234 @@ +package main + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/restic/restic/internal/options" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +type dirEntry struct { + path string + fi os.FileInfo + link uint64 +} + +func walkDir(dir string) <-chan *dirEntry { + ch := make(chan *dirEntry, 100) + + go func() { + err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + fmt.Fprintf(os.Stderr, "error: %v\n", err) + return nil + } + + name, err := filepath.Rel(dir, path) + if err != nil { + fmt.Fprintf(os.Stderr, "error: %v\n", err) + return nil + } + + ch <- &dirEntry{ + path: name, + fi: info, + link: nlink(info), + } + + return nil + }) + + if err != nil { + fmt.Fprintf(os.Stderr, "Walk() error: %v\n", err) + } + + close(ch) + }() + + // first element is root + _ = <-ch + + return ch +} + +func isSymlink(fi os.FileInfo) bool { + mode := fi.Mode() & (os.ModeType | os.ModeCharDevice) + return mode == os.ModeSymlink +} + +func sameModTime(fi1, fi2 os.FileInfo) bool { + switch runtime.GOOS { + case "darwin", "freebsd", "openbsd", "netbsd": + if isSymlink(fi1) && isSymlink(fi2) { + return true + } + } + + same := fi1.ModTime().Equal(fi2.ModTime()) + if !same && (runtime.GOOS == "darwin" || runtime.GOOS == "openbsd") { + // Allow up to 1μs difference, because macOS <10.13 cannot restore + // with nanosecond precision and the current version of Go (1.9.2) + // does not yet support the new syscall. (#1087) + mt1 := fi1.ModTime() + mt2 := fi2.ModTime() + usecDiff := (mt1.Nanosecond()-mt2.Nanosecond())/1000 + (mt1.Second()-mt2.Second())*1000000 + same = usecDiff <= 1 && usecDiff >= -1 + } + return same +} + +// directoriesEqualContents checks if both directories contain exactly the same +// contents. +func directoriesEqualContents(dir1, dir2 string) bool { + ch1 := walkDir(dir1) + ch2 := walkDir(dir2) + + changes := false + + var a, b *dirEntry + for { + var ok bool + + if ch1 != nil && a == nil { + a, ok = <-ch1 + if !ok { + ch1 = nil + } + } + + if ch2 != nil && b == nil { + b, ok = <-ch2 + if !ok { + ch2 = nil + } + } + + if ch1 == nil && ch2 == nil { + break + } + + if ch1 == nil { + fmt.Printf("+%v\n", b.path) + changes = true + } else if ch2 == nil { + fmt.Printf("-%v\n", a.path) + changes = true + } else if !a.equals(b) { + if a.path < b.path { + fmt.Printf("-%v\n", a.path) + changes = true + a = nil + continue + } else if a.path > b.path { + fmt.Printf("+%v\n", b.path) + changes = true + b = nil + continue + } else { + fmt.Printf("%%%v\n", a.path) + changes = true + } + } + + a, b = nil, nil + } + + if changes { + return false + } + + return true +} + +type dirStat struct { + files, dirs, other uint + size uint64 +} + +func isFile(fi os.FileInfo) bool { + return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0 +} + +// dirStats walks dir and collects stats. +func dirStats(dir string) (stat dirStat) { + for entry := range walkDir(dir) { + if isFile(entry.fi) { + stat.files++ + stat.size += uint64(entry.fi.Size()) + continue + } + + if entry.fi.IsDir() { + stat.dirs++ + continue + } + + stat.other++ + } + + return stat +} + +type testEnvironment struct { + base, cache, repo, mountpoint, testdata string + gopts GlobalOptions +} + +// withTestEnvironment creates a test environment and returns a cleanup +// function which removes it. +func withTestEnvironment(t testing.TB) (env *testEnvironment, cleanup func()) { + if !rtest.RunIntegrationTest { + t.Skip("integration tests disabled") + } + + repository.TestUseLowSecurityKDFParameters(t) + restic.TestDisableCheckPolynomial(t) + + tempdir, err := ioutil.TempDir(rtest.TestTempDir, "restic-test-") + rtest.OK(t, err) + + env = &testEnvironment{ + base: tempdir, + cache: filepath.Join(tempdir, "cache"), + repo: filepath.Join(tempdir, "repo"), + testdata: filepath.Join(tempdir, "testdata"), + mountpoint: filepath.Join(tempdir, "mount"), + } + + rtest.OK(t, os.MkdirAll(env.mountpoint, 0700)) + rtest.OK(t, os.MkdirAll(env.testdata, 0700)) + rtest.OK(t, os.MkdirAll(env.cache, 0700)) + rtest.OK(t, os.MkdirAll(env.repo, 0700)) + + env.gopts = GlobalOptions{ + Repo: env.repo, + Quiet: true, + CacheDir: env.cache, + ctx: context.Background(), + password: rtest.TestPassword, + stdout: os.Stdout, + stderr: os.Stderr, + extended: make(options.Options), + } + + // always overwrite global options + globalOptions = env.gopts + + cleanup = func() { + if !rtest.TestCleanupTempDirs { + t.Logf("leaving temporary directory %v used for test", tempdir) + return + } + rtest.RemoveAll(t, tempdir) + } + + return env, cleanup +} diff --git a/cmd/restic/integration_helpers_unix_test.go b/cmd/restic/integration_helpers_unix_test.go new file mode 100644 index 000000000..2a06db63d --- /dev/null +++ b/cmd/restic/integration_helpers_unix_test.go @@ -0,0 +1,70 @@ +//+build !windows + +package main + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "syscall" +) + +func (e *dirEntry) equals(other *dirEntry) bool { + if e.path != other.path { + fmt.Fprintf(os.Stderr, "%v: path does not match (%v != %v)\n", e.path, e.path, other.path) + return false + } + + if e.fi.Mode() != other.fi.Mode() { + fmt.Fprintf(os.Stderr, "%v: mode does not match (%v != %v)\n", e.path, e.fi.Mode(), other.fi.Mode()) + return false + } + + if !sameModTime(e.fi, other.fi) { + fmt.Fprintf(os.Stderr, "%v: ModTime does not match (%v != %v)\n", e.path, e.fi.ModTime(), other.fi.ModTime()) + return false + } + + stat, _ := e.fi.Sys().(*syscall.Stat_t) + stat2, _ := other.fi.Sys().(*syscall.Stat_t) + + if stat.Uid != stat2.Uid { + fmt.Fprintf(os.Stderr, "%v: UID does not match (%v != %v)\n", e.path, stat.Uid, stat2.Uid) + return false + } + + if stat.Gid != stat2.Gid { + fmt.Fprintf(os.Stderr, "%v: GID does not match (%v != %v)\n", e.path, stat.Gid, stat2.Gid) + return false + } + + if stat.Nlink != stat2.Nlink { + fmt.Fprintf(os.Stderr, "%v: Number of links do not match (%v != %v)\n", e.path, stat.Nlink, stat2.Nlink) + return false + } + + return true +} + +func nlink(info os.FileInfo) uint64 { + stat, _ := info.Sys().(*syscall.Stat_t) + return uint64(stat.Nlink) +} + +func createFileSetPerHardlink(dir string) map[uint64][]string { + var stat syscall.Stat_t + linkTests := make(map[uint64][]string) + files, err := ioutil.ReadDir(dir) + if err != nil { + return nil + } + for _, f := range files { + + if err := syscall.Stat(filepath.Join(dir, f.Name()), &stat); err != nil { + return nil + } + linkTests[uint64(stat.Ino)] = append(linkTests[uint64(stat.Ino)], f.Name()) + } + return linkTests +} diff --git a/cmd/restic/integration_helpers_windows_test.go b/cmd/restic/integration_helpers_windows_test.go new file mode 100644 index 000000000..9e3fbac9b --- /dev/null +++ b/cmd/restic/integration_helpers_windows_test.go @@ -0,0 +1,49 @@ +//+build windows + +package main + +import ( + "fmt" + "io/ioutil" + "os" +) + +func (e *dirEntry) equals(other *dirEntry) bool { + if e.path != other.path { + fmt.Fprintf(os.Stderr, "%v: path does not match (%v != %v)\n", e.path, e.path, other.path) + return false + } + + if e.fi.Mode() != other.fi.Mode() { + fmt.Fprintf(os.Stderr, "%v: mode does not match (%v != %v)\n", e.path, e.fi.Mode(), other.fi.Mode()) + return false + } + + if !sameModTime(e.fi, other.fi) { + fmt.Fprintf(os.Stderr, "%v: ModTime does not match (%v != %v)\n", e.path, e.fi.ModTime(), other.fi.ModTime()) + return false + } + + return true +} + +func nlink(info os.FileInfo) uint64 { + return 1 +} + +func inode(info os.FileInfo) uint64 { + return uint64(0) +} + +func createFileSetPerHardlink(dir string) map[uint64][]string { + linkTests := make(map[uint64][]string) + files, err := ioutil.ReadDir(dir) + if err != nil { + return nil + } + for i, f := range files { + linkTests[uint64(i)] = append(linkTests[uint64(i)], f.Name()) + i++ + } + return linkTests +} diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go new file mode 100644 index 000000000..e47000d34 --- /dev/null +++ b/cmd/restic/integration_test.go @@ -0,0 +1,1190 @@ +package main + +import ( + "bufio" + "bytes" + "context" + "crypto/rand" + "encoding/json" + "fmt" + "io" + "io/ioutil" + mrand "math/rand" + "os" + "path/filepath" + "regexp" + "strings" + "syscall" + "testing" + "time" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/filter" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/termstatus" + "golang.org/x/sync/errgroup" +) + +func parseIDsFromReader(t testing.TB, rd io.Reader) restic.IDs { + IDs := restic.IDs{} + sc := bufio.NewScanner(rd) + + for sc.Scan() { + id, err := restic.ParseID(sc.Text()) + if err != nil { + t.Logf("parse id %v: %v", sc.Text(), err) + continue + } + + IDs = append(IDs, id) + } + + return IDs +} + +func testRunInit(t testing.TB, opts GlobalOptions) { + repository.TestUseLowSecurityKDFParameters(t) + restic.TestDisableCheckPolynomial(t) + restic.TestSetLockTimeout(t, 0) + + rtest.OK(t, runInit(opts, nil)) + t.Logf("repository initialized at %v", opts.Repo) +} + +func testRunBackup(t testing.TB, dir string, target []string, opts BackupOptions, gopts GlobalOptions) { + ctx, cancel := context.WithCancel(gopts.ctx) + defer cancel() + + var wg errgroup.Group + term := termstatus.New(gopts.stdout, gopts.stderr, gopts.Quiet) + wg.Go(func() error { term.Run(ctx); return nil }) + + gopts.stdout = ioutil.Discard + t.Logf("backing up %v in %v", target, dir) + if dir != "" { + cleanup := fs.TestChdir(t, dir) + defer cleanup() + } + + rtest.OK(t, runBackup(opts, gopts, term, target)) + + cancel() + + err := wg.Wait() + if err != nil { + t.Fatal(err) + } +} + +func testRunList(t testing.TB, tpe string, opts GlobalOptions) restic.IDs { + buf := bytes.NewBuffer(nil) + globalOptions.stdout = buf + defer func() { + globalOptions.stdout = os.Stdout + }() + + rtest.OK(t, runList(cmdList, opts, []string{tpe})) + return parseIDsFromReader(t, buf) +} + +func testRunRestore(t testing.TB, opts GlobalOptions, dir string, snapshotID restic.ID) { + testRunRestoreExcludes(t, opts, dir, snapshotID, nil) +} + +func testRunRestoreLatest(t testing.TB, gopts GlobalOptions, dir string, paths []string, host string) { + opts := RestoreOptions{ + Target: dir, + Host: host, + Paths: paths, + } + + rtest.OK(t, runRestore(opts, gopts, []string{"latest"})) +} + +func testRunRestoreExcludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, excludes []string) { + opts := RestoreOptions{ + Target: dir, + Exclude: excludes, + } + + rtest.OK(t, runRestore(opts, gopts, []string{snapshotID.String()})) +} + +func testRunRestoreIncludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, includes []string) { + opts := RestoreOptions{ + Target: dir, + Include: includes, + } + + rtest.OK(t, runRestore(opts, gopts, []string{snapshotID.String()})) +} + +func testRunCheck(t testing.TB, gopts GlobalOptions) { + opts := CheckOptions{ + ReadData: true, + CheckUnused: true, + } + rtest.OK(t, runCheck(opts, gopts, nil)) +} + +func testRunCheckOutput(gopts GlobalOptions) (string, error) { + buf := bytes.NewBuffer(nil) + + globalOptions.stdout = buf + defer func() { + globalOptions.stdout = os.Stdout + }() + + opts := CheckOptions{ + ReadData: true, + } + + err := runCheck(opts, gopts, nil) + return string(buf.Bytes()), err +} + +func testRunRebuildIndex(t testing.TB, gopts GlobalOptions) { + globalOptions.stdout = ioutil.Discard + defer func() { + globalOptions.stdout = os.Stdout + }() + + rtest.OK(t, runRebuildIndex(gopts)) +} + +func testRunLs(t testing.TB, gopts GlobalOptions, snapshotID string) []string { + buf := bytes.NewBuffer(nil) + globalOptions.stdout = buf + quiet := globalOptions.Quiet + globalOptions.Quiet = true + defer func() { + globalOptions.stdout = os.Stdout + globalOptions.Quiet = quiet + }() + + opts := LsOptions{} + + rtest.OK(t, runLs(opts, gopts, []string{snapshotID})) + + return strings.Split(string(buf.Bytes()), "\n") +} + +func testRunFind(t testing.TB, wantJSON bool, gopts GlobalOptions, pattern string) []byte { + buf := bytes.NewBuffer(nil) + globalOptions.stdout = buf + globalOptions.JSON = wantJSON + defer func() { + globalOptions.stdout = os.Stdout + globalOptions.JSON = false + }() + + opts := FindOptions{} + + rtest.OK(t, runFind(opts, gopts, []string{pattern})) + + return buf.Bytes() +} + +func testRunSnapshots(t testing.TB, gopts GlobalOptions) (newest *Snapshot, snapmap map[restic.ID]Snapshot) { + buf := bytes.NewBuffer(nil) + globalOptions.stdout = buf + globalOptions.JSON = true + defer func() { + globalOptions.stdout = os.Stdout + globalOptions.JSON = gopts.JSON + }() + + opts := SnapshotOptions{} + + rtest.OK(t, runSnapshots(opts, globalOptions, []string{})) + + snapshots := []Snapshot{} + rtest.OK(t, json.Unmarshal(buf.Bytes(), &snapshots)) + + snapmap = make(map[restic.ID]Snapshot, len(snapshots)) + for _, sn := range snapshots { + snapmap[*sn.ID] = sn + if newest == nil || sn.Time.After(newest.Time) { + newest = &sn + } + } + return +} + +func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) { + opts := ForgetOptions{} + rtest.OK(t, runForget(opts, gopts, args)) +} + +func testRunPrune(t testing.TB, gopts GlobalOptions) { + rtest.OK(t, runPrune(gopts)) +} + +func TestBackup(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + datafile := filepath.Join("testdata", "backup-data.tar.gz") + fd, err := os.Open(datafile) + if os.IsNotExist(errors.Cause(err)) { + t.Skipf("unable to find data file %q, skipping", datafile) + return + } + rtest.OK(t, err) + rtest.OK(t, fd.Close()) + + testRunInit(t, env.gopts) + + rtest.SetupTarTestFixture(t, env.testdata, datafile) + opts := BackupOptions{} + + // first backup + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + snapshotIDs := testRunList(t, "snapshots", env.gopts) + rtest.Assert(t, len(snapshotIDs) == 1, + "expected one snapshot, got %v", snapshotIDs) + + testRunCheck(t, env.gopts) + stat1 := dirStats(env.repo) + + // second backup, implicit incremental + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + snapshotIDs = testRunList(t, "snapshots", env.gopts) + rtest.Assert(t, len(snapshotIDs) == 2, + "expected two snapshots, got %v", snapshotIDs) + + stat2 := dirStats(env.repo) + if stat2.size > stat1.size+stat1.size/10 { + t.Error("repository size has grown by more than 10 percent") + } + t.Logf("repository grown by %d bytes", stat2.size-stat1.size) + + testRunCheck(t, env.gopts) + // third backup, explicit incremental + opts.Parent = snapshotIDs[0].String() + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + snapshotIDs = testRunList(t, "snapshots", env.gopts) + rtest.Assert(t, len(snapshotIDs) == 3, + "expected three snapshots, got %v", snapshotIDs) + + stat3 := dirStats(env.repo) + if stat3.size > stat1.size+stat1.size/10 { + t.Error("repository size has grown by more than 10 percent") + } + t.Logf("repository grown by %d bytes", stat3.size-stat2.size) + + // restore all backups and compare + for i, snapshotID := range snapshotIDs { + restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i)) + t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir) + testRunRestore(t, env.gopts, restoredir, snapshotIDs[0]) + rtest.Assert(t, directoriesEqualContents(env.testdata, filepath.Join(restoredir, "testdata")), + "directories are not equal") + } + + testRunCheck(t, env.gopts) +} + +func TestBackupNonExistingFile(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + datafile := filepath.Join("testdata", "backup-data.tar.gz") + fd, err := os.Open(datafile) + if os.IsNotExist(errors.Cause(err)) { + t.Skipf("unable to find data file %q, skipping", datafile) + return + } + rtest.OK(t, err) + rtest.OK(t, fd.Close()) + + rtest.SetupTarTestFixture(t, env.testdata, datafile) + + testRunInit(t, env.gopts) + globalOptions.stderr = ioutil.Discard + defer func() { + globalOptions.stderr = os.Stderr + }() + + p := filepath.Join(env.testdata, "0", "0", "9") + dirs := []string{ + filepath.Join(p, "0"), + filepath.Join(p, "1"), + filepath.Join(p, "nonexisting"), + filepath.Join(p, "5"), + } + + opts := BackupOptions{} + + testRunBackup(t, "", dirs, opts, env.gopts) +} + +func includes(haystack []string, needle string) bool { + for _, s := range haystack { + if s == needle { + return true + } + } + + return false +} + +func loadSnapshotMap(t testing.TB, gopts GlobalOptions) map[string]struct{} { + snapshotIDs := testRunList(t, "snapshots", gopts) + + m := make(map[string]struct{}) + for _, id := range snapshotIDs { + m[id.String()] = struct{}{} + } + + return m +} + +func lastSnapshot(old, new map[string]struct{}) (map[string]struct{}, string) { + for k := range new { + if _, ok := old[k]; !ok { + old[k] = struct{}{} + return old, k + } + } + + return old, "" +} + +var backupExcludeFilenames = []string{ + "testfile1", + "foo.tar.gz", + "private/secret/passwords.txt", + "work/source/test.c", +} + +func TestBackupExclude(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testRunInit(t, env.gopts) + + datadir := filepath.Join(env.base, "testdata") + + for _, filename := range backupExcludeFilenames { + fp := filepath.Join(datadir, filename) + rtest.OK(t, os.MkdirAll(filepath.Dir(fp), 0755)) + + f, err := os.Create(fp) + rtest.OK(t, err) + + fmt.Fprintf(f, filename) + rtest.OK(t, f.Close()) + } + + snapshots := make(map[string]struct{}) + + opts := BackupOptions{} + + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + snapshots, snapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts)) + files := testRunLs(t, env.gopts, snapshotID) + rtest.Assert(t, includes(files, "/testdata/foo.tar.gz"), + "expected file %q in first snapshot, but it's not included", "foo.tar.gz") + + opts.Excludes = []string{"*.tar.gz"} + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + snapshots, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts)) + files = testRunLs(t, env.gopts, snapshotID) + rtest.Assert(t, !includes(files, "/testdata/foo.tar.gz"), + "expected file %q not in first snapshot, but it's included", "foo.tar.gz") + + opts.Excludes = []string{"*.tar.gz", "private/secret"} + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + _, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts)) + files = testRunLs(t, env.gopts, snapshotID) + rtest.Assert(t, !includes(files, "/testdata/foo.tar.gz"), + "expected file %q not in first snapshot, but it's included", "foo.tar.gz") + rtest.Assert(t, !includes(files, "/testdata/private/secret/passwords.txt"), + "expected file %q not in first snapshot, but it's included", "passwords.txt") +} + +const ( + incrementalFirstWrite = 10 * 1042 * 1024 + incrementalSecondWrite = 1 * 1042 * 1024 + incrementalThirdWrite = 1 * 1042 * 1024 +) + +func appendRandomData(filename string, bytes uint) error { + f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + fmt.Fprint(os.Stderr, err) + return err + } + + _, err = f.Seek(0, 2) + if err != nil { + fmt.Fprint(os.Stderr, err) + return err + } + + _, err = io.Copy(f, io.LimitReader(rand.Reader, int64(bytes))) + if err != nil { + fmt.Fprint(os.Stderr, err) + return err + } + + return f.Close() +} + +func TestIncrementalBackup(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testRunInit(t, env.gopts) + + datadir := filepath.Join(env.base, "testdata") + testfile := filepath.Join(datadir, "testfile") + + rtest.OK(t, appendRandomData(testfile, incrementalFirstWrite)) + + opts := BackupOptions{} + + testRunBackup(t, "", []string{datadir}, opts, env.gopts) + testRunCheck(t, env.gopts) + stat1 := dirStats(env.repo) + + rtest.OK(t, appendRandomData(testfile, incrementalSecondWrite)) + + testRunBackup(t, "", []string{datadir}, opts, env.gopts) + testRunCheck(t, env.gopts) + stat2 := dirStats(env.repo) + if stat2.size-stat1.size > incrementalFirstWrite { + t.Errorf("repository size has grown by more than %d bytes", incrementalFirstWrite) + } + t.Logf("repository grown by %d bytes", stat2.size-stat1.size) + + rtest.OK(t, appendRandomData(testfile, incrementalThirdWrite)) + + testRunBackup(t, "", []string{datadir}, opts, env.gopts) + testRunCheck(t, env.gopts) + stat3 := dirStats(env.repo) + if stat3.size-stat2.size > incrementalFirstWrite { + t.Errorf("repository size has grown by more than %d bytes", incrementalFirstWrite) + } + t.Logf("repository grown by %d bytes", stat3.size-stat2.size) +} + +func TestBackupTags(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + datafile := filepath.Join("testdata", "backup-data.tar.gz") + testRunInit(t, env.gopts) + rtest.SetupTarTestFixture(t, env.testdata, datafile) + + opts := BackupOptions{} + + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) + testRunCheck(t, env.gopts) + newest, _ := testRunSnapshots(t, env.gopts) + rtest.Assert(t, newest != nil, "expected a new backup, got nil") + rtest.Assert(t, len(newest.Tags) == 0, + "expected no tags, got %v", newest.Tags) + parent := newest + + opts.Tags = []string{"NL"} + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) + testRunCheck(t, env.gopts) + newest, _ = testRunSnapshots(t, env.gopts) + rtest.Assert(t, newest != nil, "expected a new backup, got nil") + rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL", + "expected one NL tag, got %v", newest.Tags) + // Tagged backup should have untagged backup as parent. + rtest.Assert(t, parent.ID.Equal(*newest.Parent), + "expected parent to be %v, got %v", parent.ID, newest.Parent) +} + +func testRunTag(t testing.TB, opts TagOptions, gopts GlobalOptions) { + rtest.OK(t, runTag(opts, gopts, []string{})) +} + +func TestTag(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + datafile := filepath.Join("testdata", "backup-data.tar.gz") + testRunInit(t, env.gopts) + rtest.SetupTarTestFixture(t, env.testdata, datafile) + + testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts) + testRunCheck(t, env.gopts) + newest, _ := testRunSnapshots(t, env.gopts) + rtest.Assert(t, newest != nil, "expected a new backup, got nil") + rtest.Assert(t, len(newest.Tags) == 0, + "expected no tags, got %v", newest.Tags) + rtest.Assert(t, newest.Original == nil, + "expected original ID to be nil, got %v", newest.Original) + originalID := *newest.ID + + testRunTag(t, TagOptions{SetTags: []string{"NL"}}, env.gopts) + testRunCheck(t, env.gopts) + newest, _ = testRunSnapshots(t, env.gopts) + rtest.Assert(t, newest != nil, "expected a new backup, got nil") + rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL", + "set failed, expected one NL tag, got %v", newest.Tags) + rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil") + rtest.Assert(t, *newest.Original == originalID, + "expected original ID to be set to the first snapshot id") + + testRunTag(t, TagOptions{AddTags: []string{"CH"}}, env.gopts) + testRunCheck(t, env.gopts) + newest, _ = testRunSnapshots(t, env.gopts) + rtest.Assert(t, newest != nil, "expected a new backup, got nil") + rtest.Assert(t, len(newest.Tags) == 2 && newest.Tags[0] == "NL" && newest.Tags[1] == "CH", + "add failed, expected CH,NL tags, got %v", newest.Tags) + rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil") + rtest.Assert(t, *newest.Original == originalID, + "expected original ID to be set to the first snapshot id") + + testRunTag(t, TagOptions{RemoveTags: []string{"NL"}}, env.gopts) + testRunCheck(t, env.gopts) + newest, _ = testRunSnapshots(t, env.gopts) + rtest.Assert(t, newest != nil, "expected a new backup, got nil") + rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "CH", + "remove failed, expected one CH tag, got %v", newest.Tags) + rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil") + rtest.Assert(t, *newest.Original == originalID, + "expected original ID to be set to the first snapshot id") + + testRunTag(t, TagOptions{AddTags: []string{"US", "RU"}}, env.gopts) + testRunTag(t, TagOptions{RemoveTags: []string{"CH", "US", "RU"}}, env.gopts) + testRunCheck(t, env.gopts) + newest, _ = testRunSnapshots(t, env.gopts) + rtest.Assert(t, newest != nil, "expected a new backup, got nil") + rtest.Assert(t, len(newest.Tags) == 0, + "expected no tags, got %v", newest.Tags) + rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil") + rtest.Assert(t, *newest.Original == originalID, + "expected original ID to be set to the first snapshot id") + + // Check special case of removing all tags. + testRunTag(t, TagOptions{SetTags: []string{""}}, env.gopts) + testRunCheck(t, env.gopts) + newest, _ = testRunSnapshots(t, env.gopts) + rtest.Assert(t, newest != nil, "expected a new backup, got nil") + rtest.Assert(t, len(newest.Tags) == 0, + "expected no tags, got %v", newest.Tags) + rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil") + rtest.Assert(t, *newest.Original == originalID, + "expected original ID to be set to the first snapshot id") +} + +func testRunKeyListOtherIDs(t testing.TB, gopts GlobalOptions) []string { + buf := bytes.NewBuffer(nil) + + globalOptions.stdout = buf + defer func() { + globalOptions.stdout = os.Stdout + }() + + rtest.OK(t, runKey(gopts, []string{"list"})) + + scanner := bufio.NewScanner(buf) + exp := regexp.MustCompile(`^ ([a-f0-9]+) `) + + IDs := []string{} + for scanner.Scan() { + if id := exp.FindStringSubmatch(scanner.Text()); id != nil { + IDs = append(IDs, id[1]) + } + } + + return IDs +} + +func testRunKeyAddNewKey(t testing.TB, newPassword string, gopts GlobalOptions) { + testKeyNewPassword = newPassword + defer func() { + testKeyNewPassword = "" + }() + + rtest.OK(t, runKey(gopts, []string{"add"})) +} + +func testRunKeyPasswd(t testing.TB, newPassword string, gopts GlobalOptions) { + testKeyNewPassword = newPassword + defer func() { + testKeyNewPassword = "" + }() + + rtest.OK(t, runKey(gopts, []string{"passwd"})) +} + +func testRunKeyRemove(t testing.TB, gopts GlobalOptions, IDs []string) { + t.Logf("remove %d keys: %q\n", len(IDs), IDs) + for _, id := range IDs { + rtest.OK(t, runKey(gopts, []string{"remove", id})) + } +} + +func TestKeyAddRemove(t *testing.T) { + passwordList := []string{ + "OnnyiasyatvodsEvVodyawit", + "raicneirvOjEfEigonOmLasOd", + } + + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testRunInit(t, env.gopts) + + testRunKeyPasswd(t, "geheim2", env.gopts) + env.gopts.password = "geheim2" + t.Logf("changed password to %q", env.gopts.password) + + for _, newPassword := range passwordList { + testRunKeyAddNewKey(t, newPassword, env.gopts) + t.Logf("added new password %q", newPassword) + env.gopts.password = newPassword + testRunKeyRemove(t, env.gopts, testRunKeyListOtherIDs(t, env.gopts)) + } + + env.gopts.password = passwordList[len(passwordList)-1] + t.Logf("testing access with last password %q\n", env.gopts.password) + rtest.OK(t, runKey(env.gopts, []string{"list"})) + testRunCheck(t, env.gopts) +} + +func testFileSize(filename string, size int64) error { + fi, err := os.Stat(filename) + if err != nil { + return err + } + + if fi.Size() != size { + return errors.Fatalf("wrong file size for %v: expected %v, got %v", filename, size, fi.Size()) + } + + return nil +} + +func TestRestoreFilter(t *testing.T) { + testfiles := []struct { + name string + size uint + }{ + {"testfile1.c", 100}, + {"testfile2.exe", 101}, + {"subdir1/subdir2/testfile3.docx", 102}, + {"subdir1/subdir2/testfile4.c", 102}, + } + + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testRunInit(t, env.gopts) + + for _, testFile := range testfiles { + p := filepath.Join(env.testdata, testFile.name) + rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755)) + rtest.OK(t, appendRandomData(p, testFile.size)) + } + + opts := BackupOptions{} + + testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) + testRunCheck(t, env.gopts) + + snapshotID := testRunList(t, "snapshots", env.gopts)[0] + + // no restore filter should restore all files + testRunRestore(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID) + for _, testFile := range testfiles { + rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", testFile.name), int64(testFile.size))) + } + + for i, pat := range []string{"*.c", "*.exe", "*", "*file3*"} { + base := filepath.Join(env.base, fmt.Sprintf("restore%d", i+1)) + testRunRestoreExcludes(t, env.gopts, base, snapshotID, []string{pat}) + for _, testFile := range testfiles { + err := testFileSize(filepath.Join(base, "testdata", testFile.name), int64(testFile.size)) + if ok, _ := filter.Match(pat, filepath.Base(testFile.name)); !ok { + rtest.OK(t, err) + } else { + rtest.Assert(t, os.IsNotExist(errors.Cause(err)), + "expected %v to not exist in restore step %v, but it exists, err %v", testFile.name, i+1, err) + } + } + } +} + +func TestRestore(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testRunInit(t, env.gopts) + + for i := 0; i < 10; i++ { + p := filepath.Join(env.testdata, fmt.Sprintf("foo/bar/testfile%v", i)) + rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755)) + rtest.OK(t, appendRandomData(p, uint(mrand.Intn(2<<21)))) + } + + opts := BackupOptions{} + + testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) + testRunCheck(t, env.gopts) + + // Restore latest without any filters + restoredir := filepath.Join(env.base, "restore") + testRunRestoreLatest(t, env.gopts, restoredir, nil, "") + + rtest.Assert(t, directoriesEqualContents(env.testdata, filepath.Join(restoredir, filepath.Base(env.testdata))), + "directories are not equal") +} + +func TestRestoreLatest(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testRunInit(t, env.gopts) + + p := filepath.Join(env.testdata, "testfile.c") + rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755)) + rtest.OK(t, appendRandomData(p, 100)) + + opts := BackupOptions{} + + // chdir manually here so we can get the current directory. This is not the + // same as the temp dir returned by ioutil.TempDir() on darwin. + back := fs.TestChdir(t, filepath.Dir(env.testdata)) + defer back() + + curdir, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + testRunBackup(t, "", []string{filepath.Base(env.testdata)}, opts, env.gopts) + testRunCheck(t, env.gopts) + + os.Remove(p) + rtest.OK(t, appendRandomData(p, 101)) + testRunBackup(t, "", []string{filepath.Base(env.testdata)}, opts, env.gopts) + testRunCheck(t, env.gopts) + + // Restore latest without any filters + testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore0"), nil, "") + rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", "testfile.c"), int64(101))) + + // Setup test files in different directories backed up in different snapshots + p1 := filepath.Join(curdir, filepath.FromSlash("p1/testfile.c")) + + rtest.OK(t, os.MkdirAll(filepath.Dir(p1), 0755)) + rtest.OK(t, appendRandomData(p1, 102)) + testRunBackup(t, "", []string{"p1"}, opts, env.gopts) + testRunCheck(t, env.gopts) + + p2 := filepath.Join(curdir, filepath.FromSlash("p2/testfile.c")) + + rtest.OK(t, os.MkdirAll(filepath.Dir(p2), 0755)) + rtest.OK(t, appendRandomData(p2, 103)) + testRunBackup(t, "", []string{"p2"}, opts, env.gopts) + testRunCheck(t, env.gopts) + + p1rAbs := filepath.Join(env.base, "restore1", "p1/testfile.c") + p2rAbs := filepath.Join(env.base, "restore2", "p2/testfile.c") + + testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore1"), []string{filepath.Dir(p1)}, "") + rtest.OK(t, testFileSize(p1rAbs, int64(102))) + if _, err := os.Stat(p2rAbs); os.IsNotExist(errors.Cause(err)) { + rtest.Assert(t, os.IsNotExist(errors.Cause(err)), + "expected %v to not exist in restore, but it exists, err %v", p2rAbs, err) + } + + testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore2"), []string{filepath.Dir(p2)}, "") + rtest.OK(t, testFileSize(p2rAbs, int64(103))) + if _, err := os.Stat(p1rAbs); os.IsNotExist(errors.Cause(err)) { + rtest.Assert(t, os.IsNotExist(errors.Cause(err)), + "expected %v to not exist in restore, but it exists, err %v", p1rAbs, err) + } +} + +func TestRestoreWithPermissionFailure(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + datafile := filepath.Join("testdata", "repo-restore-permissions-test.tar.gz") + rtest.SetupTarTestFixture(t, env.base, datafile) + + snapshots := testRunList(t, "snapshots", env.gopts) + rtest.Assert(t, len(snapshots) > 0, + "no snapshots found in repo (%v)", datafile) + + globalOptions.stderr = ioutil.Discard + defer func() { + globalOptions.stderr = os.Stderr + }() + + testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshots[0]) + + // make sure that all files have been restored, regardless of any + // permission errors + files := testRunLs(t, env.gopts, snapshots[0].String()) + for _, filename := range files { + fi, err := os.Lstat(filepath.Join(env.base, "restore", filename)) + rtest.OK(t, err) + + rtest.Assert(t, !isFile(fi) || fi.Size() > 0, + "file %v restored, but filesize is 0", filename) + } +} + +func setZeroModTime(filename string) error { + var utimes = []syscall.Timespec{ + syscall.NsecToTimespec(0), + syscall.NsecToTimespec(0), + } + + return syscall.UtimesNano(filename, utimes) +} + +func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + testRunInit(t, env.gopts) + + p := filepath.Join(env.testdata, "subdir1", "subdir2", "subdir3", "file.ext") + rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755)) + rtest.OK(t, appendRandomData(p, 200)) + rtest.OK(t, setZeroModTime(filepath.Join(env.testdata, "subdir1", "subdir2"))) + + opts := BackupOptions{} + + testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) + testRunCheck(t, env.gopts) + + snapshotID := testRunList(t, "snapshots", env.gopts)[0] + + // restore with filter "*.ext", this should restore "file.ext", but + // since the directories are ignored and only created because of + // "file.ext", no meta data should be restored for them. + testRunRestoreIncludes(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID, []string{"*.ext"}) + + f1 := filepath.Join(env.base, "restore0", "testdata", "subdir1", "subdir2") + fi, err := os.Stat(f1) + rtest.OK(t, err) + + // restore with filter "*", this should restore meta data on everything. + testRunRestoreIncludes(t, env.gopts, filepath.Join(env.base, "restore1"), snapshotID, []string{"*"}) + + f2 := filepath.Join(env.base, "restore1", "testdata", "subdir1", "subdir2") + fi, err = os.Stat(f2) + rtest.OK(t, err) + + rtest.Assert(t, fi.ModTime() == time.Unix(0, 0), + "meta data of intermediate directory hasn't been restore") +} + +func TestFind(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + datafile := filepath.Join("testdata", "backup-data.tar.gz") + testRunInit(t, env.gopts) + rtest.SetupTarTestFixture(t, env.testdata, datafile) + + opts := BackupOptions{} + + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) + testRunCheck(t, env.gopts) + + results := testRunFind(t, false, env.gopts, "unexistingfile") + rtest.Assert(t, len(results) == 0, "unexisting file found in repo (%v)", datafile) + + results = testRunFind(t, false, env.gopts, "testfile") + lines := strings.Split(string(results), "\n") + rtest.Assert(t, len(lines) == 2, "expected one file found in repo (%v)", datafile) + + results = testRunFind(t, false, env.gopts, "testfile*") + lines = strings.Split(string(results), "\n") + rtest.Assert(t, len(lines) == 4, "expected three files found in repo (%v)", datafile) +} + +type testMatch struct { + Path string `json:"path,omitempty"` + Permissions string `json:"permissions,omitempty"` + Size uint64 `json:"size,omitempty"` + Date time.Time `json:"date,omitempty"` + UID uint32 `json:"uid,omitempty"` + GID uint32 `json:"gid,omitempty"` +} + +type testMatches struct { + Hits int `json:"hits,omitempty"` + SnapshotID string `json:"snapshot,omitempty"` + Matches []testMatch `json:"matches,omitempty"` +} + +func TestFindJSON(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + datafile := filepath.Join("testdata", "backup-data.tar.gz") + testRunInit(t, env.gopts) + rtest.SetupTarTestFixture(t, env.testdata, datafile) + + opts := BackupOptions{} + + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) + testRunCheck(t, env.gopts) + + results := testRunFind(t, true, env.gopts, "unexistingfile") + matches := []testMatches{} + rtest.OK(t, json.Unmarshal(results, &matches)) + rtest.Assert(t, len(matches) == 0, "expected no match in repo (%v)", datafile) + + results = testRunFind(t, true, env.gopts, "testfile") + rtest.OK(t, json.Unmarshal(results, &matches)) + rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile) + rtest.Assert(t, len(matches[0].Matches) == 1, "expected a single file to match (%v)", datafile) + rtest.Assert(t, matches[0].Hits == 1, "expected hits to show 1 match (%v)", datafile) + + results = testRunFind(t, true, env.gopts, "testfile*") + rtest.OK(t, json.Unmarshal(results, &matches)) + rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile) + rtest.Assert(t, len(matches[0].Matches) == 3, "expected 3 files to match (%v)", datafile) + rtest.Assert(t, matches[0].Hits == 3, "expected hits to show 3 matches (%v)", datafile) +} + +func TestRebuildIndex(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + datafile := filepath.Join("..", "..", "internal", "checker", "testdata", "duplicate-packs-in-index-test-repo.tar.gz") + rtest.SetupTarTestFixture(t, env.base, datafile) + + out, err := testRunCheckOutput(env.gopts) + if !strings.Contains(out, "contained in several indexes") { + t.Fatalf("did not find checker hint for packs in several indexes") + } + + if err != nil { + t.Fatalf("expected no error from checker for test repository, got %v", err) + } + + if !strings.Contains(out, "restic rebuild-index") { + t.Fatalf("did not find hint for rebuild-index command") + } + + testRunRebuildIndex(t, env.gopts) + + out, err = testRunCheckOutput(env.gopts) + if len(out) != 0 { + t.Fatalf("expected no output from the checker, got: %v", out) + } + + if err != nil { + t.Fatalf("expected no error from checker after rebuild-index, got: %v", err) + } +} + +func TestRebuildIndexAlwaysFull(t *testing.T) { + repository.IndexFull = func(*repository.Index) bool { return true } + TestRebuildIndex(t) +} + +func TestCheckRestoreNoLock(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + datafile := filepath.Join("testdata", "small-repo.tar.gz") + rtest.SetupTarTestFixture(t, env.base, datafile) + + err := filepath.Walk(env.repo, func(p string, fi os.FileInfo, e error) error { + if e != nil { + return e + } + return os.Chmod(p, fi.Mode() & ^(os.FileMode(0222))) + }) + rtest.OK(t, err) + + env.gopts.NoLock = true + + testRunCheck(t, env.gopts) + + snapshotIDs := testRunList(t, "snapshots", env.gopts) + if len(snapshotIDs) == 0 { + t.Fatalf("found no snapshots") + } + + testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshotIDs[0]) +} + +func TestPrune(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + datafile := filepath.Join("testdata", "backup-data.tar.gz") + fd, err := os.Open(datafile) + if os.IsNotExist(errors.Cause(err)) { + t.Skipf("unable to find data file %q, skipping", datafile) + return + } + rtest.OK(t, err) + rtest.OK(t, fd.Close()) + + testRunInit(t, env.gopts) + + rtest.SetupTarTestFixture(t, env.testdata, datafile) + opts := BackupOptions{} + + testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts) + firstSnapshot := testRunList(t, "snapshots", env.gopts) + rtest.Assert(t, len(firstSnapshot) == 1, + "expected one snapshot, got %v", firstSnapshot) + + testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts) + testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts) + + snapshotIDs := testRunList(t, "snapshots", env.gopts) + rtest.Assert(t, len(snapshotIDs) == 3, + "expected 3 snapshot, got %v", snapshotIDs) + + testRunForget(t, env.gopts, firstSnapshot[0].String()) + testRunPrune(t, env.gopts) + testRunCheck(t, env.gopts) +} + +func TestHardLink(t *testing.T) { + // this test assumes a test set with a single directory containing hard linked files + env, cleanup := withTestEnvironment(t) + defer cleanup() + + datafile := filepath.Join("testdata", "test.hl.tar.gz") + fd, err := os.Open(datafile) + if os.IsNotExist(errors.Cause(err)) { + t.Skipf("unable to find data file %q, skipping", datafile) + return + } + rtest.OK(t, err) + rtest.OK(t, fd.Close()) + + testRunInit(t, env.gopts) + + rtest.SetupTarTestFixture(t, env.testdata, datafile) + + linkTests := createFileSetPerHardlink(env.testdata) + + opts := BackupOptions{} + + // first backup + testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) + snapshotIDs := testRunList(t, "snapshots", env.gopts) + rtest.Assert(t, len(snapshotIDs) == 1, + "expected one snapshot, got %v", snapshotIDs) + + testRunCheck(t, env.gopts) + + // restore all backups and compare + for i, snapshotID := range snapshotIDs { + restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i)) + t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir) + testRunRestore(t, env.gopts, restoredir, snapshotIDs[0]) + rtest.Assert(t, directoriesEqualContents(env.testdata, filepath.Join(restoredir, "testdata")), + "directories are not equal") + + linkResults := createFileSetPerHardlink(filepath.Join(restoredir, "testdata")) + rtest.Assert(t, linksEqual(linkTests, linkResults), + "links are not equal") + } + + testRunCheck(t, env.gopts) +} + +func linksEqual(source, dest map[uint64][]string) bool { + for _, vs := range source { + found := false + for kd, vd := range dest { + if linkEqual(vs, vd) { + delete(dest, kd) + found = true + break + } + } + if !found { + return false + } + } + + if len(dest) != 0 { + return false + } + + return true +} + +func linkEqual(source, dest []string) bool { + // equal if sliced are equal without considering order + if source == nil && dest == nil { + return true + } + + if source == nil || dest == nil { + return false + } + + if len(source) != len(dest) { + return false + } + + for i := range source { + found := false + for j := range dest { + if source[i] == dest[j] { + found = true + break + } + } + if !found { + return false + } + } + + return true +} + +func TestQuietBackup(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + datafile := filepath.Join("testdata", "backup-data.tar.gz") + fd, err := os.Open(datafile) + if os.IsNotExist(errors.Cause(err)) { + t.Skipf("unable to find data file %q, skipping", datafile) + return + } + rtest.OK(t, err) + rtest.OK(t, fd.Close()) + + testRunInit(t, env.gopts) + + rtest.SetupTarTestFixture(t, env.testdata, datafile) + opts := BackupOptions{} + + env.gopts.Quiet = false + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) + snapshotIDs := testRunList(t, "snapshots", env.gopts) + rtest.Assert(t, len(snapshotIDs) == 1, + "expected one snapshot, got %v", snapshotIDs) + + testRunCheck(t, env.gopts) + + env.gopts.Quiet = true + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) + snapshotIDs = testRunList(t, "snapshots", env.gopts) + rtest.Assert(t, len(snapshotIDs) == 2, + "expected two snapshots, got %v", snapshotIDs) + + testRunCheck(t, env.gopts) +} diff --git a/cmd/restic/local_layout_test.go b/cmd/restic/local_layout_test.go new file mode 100644 index 000000000..c76112e00 --- /dev/null +++ b/cmd/restic/local_layout_test.go @@ -0,0 +1,41 @@ +package main + +import ( + "path/filepath" + "testing" + + rtest "github.com/restic/restic/internal/test" +) + +func TestRestoreLocalLayout(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + var tests = []struct { + filename string + layout string + }{ + {"repo-layout-default.tar.gz", ""}, + {"repo-layout-s3legacy.tar.gz", ""}, + {"repo-layout-default.tar.gz", "default"}, + {"repo-layout-s3legacy.tar.gz", "s3legacy"}, + } + + for _, test := range tests { + datafile := filepath.Join("..", "..", "internal", "backend", "testdata", test.filename) + + rtest.SetupTarTestFixture(t, env.base, datafile) + + env.gopts.extended["local.layout"] = test.layout + + // check the repo + testRunCheck(t, env.gopts) + + // restore latest snapshot + target := filepath.Join(env.base, "restore") + testRunRestoreLatest(t, env.gopts, target, nil, "") + + rtest.RemoveAll(t, filepath.Join(env.base, "repo")) + rtest.RemoveAll(t, target) + } +} diff --git a/cmd/restic/lock.go b/cmd/restic/lock.go new file mode 100644 index 000000000..72a818d9a --- /dev/null +++ b/cmd/restic/lock.go @@ -0,0 +1,133 @@ +package main + +import ( + "context" + "fmt" + "os" + "sync" + "time" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" +) + +var globalLocks struct { + locks []*restic.Lock + cancelRefresh chan struct{} + refreshWG sync.WaitGroup + sync.Mutex +} + +func lockRepo(repo *repository.Repository) (*restic.Lock, error) { + return lockRepository(repo, false) +} + +func lockRepoExclusive(repo *repository.Repository) (*restic.Lock, error) { + return lockRepository(repo, true) +} + +func lockRepository(repo *repository.Repository, exclusive bool) (*restic.Lock, error) { + lockFn := restic.NewLock + if exclusive { + lockFn = restic.NewExclusiveLock + } + + lock, err := lockFn(context.TODO(), repo) + if err != nil { + return nil, errors.Fatalf("unable to create lock in backend: %v", err) + } + debug.Log("create lock %p (exclusive %v)", lock, exclusive) + + globalLocks.Lock() + if globalLocks.cancelRefresh == nil { + debug.Log("start goroutine for lock refresh") + globalLocks.cancelRefresh = make(chan struct{}) + globalLocks.refreshWG = sync.WaitGroup{} + globalLocks.refreshWG.Add(1) + go refreshLocks(&globalLocks.refreshWG, globalLocks.cancelRefresh) + } + + globalLocks.locks = append(globalLocks.locks, lock) + globalLocks.Unlock() + + return lock, err +} + +var refreshInterval = 5 * time.Minute + +func refreshLocks(wg *sync.WaitGroup, done <-chan struct{}) { + debug.Log("start") + defer func() { + wg.Done() + globalLocks.Lock() + globalLocks.cancelRefresh = nil + globalLocks.Unlock() + }() + + ticker := time.NewTicker(refreshInterval) + + for { + select { + case <-done: + debug.Log("terminate") + return + case <-ticker.C: + debug.Log("refreshing locks") + globalLocks.Lock() + for _, lock := range globalLocks.locks { + err := lock.Refresh(context.TODO()) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to refresh lock: %v\n", err) + } + } + globalLocks.Unlock() + } + } +} + +func unlockRepo(lock *restic.Lock) error { + globalLocks.Lock() + defer globalLocks.Unlock() + + for i := 0; i < len(globalLocks.locks); i++ { + if lock == globalLocks.locks[i] { + // remove the lock from the repo + debug.Log("unlocking repository with lock %v", lock) + if err := lock.Unlock(); err != nil { + debug.Log("error while unlocking: %v", err) + return err + } + + // remove the lock from the list of locks + globalLocks.locks = append(globalLocks.locks[:i], globalLocks.locks[i+1:]...) + return nil + } + } + + debug.Log("unable to find lock %v in the global list of locks, ignoring", lock) + + return nil +} + +func unlockAll() error { + globalLocks.Lock() + defer globalLocks.Unlock() + + debug.Log("unlocking %d locks", len(globalLocks.locks)) + for _, lock := range globalLocks.locks { + if err := lock.Unlock(); err != nil { + debug.Log("error while unlocking: %v", err) + return err + } + debug.Log("successfully removed lock") + } + globalLocks.locks = globalLocks.locks[:0] + + return nil +} + +func init() { + AddCleanupHandler(unlockAll) +} diff --git a/cmd/restic/main.go b/cmd/restic/main.go new file mode 100644 index 000000000..e61547c5f --- /dev/null +++ b/cmd/restic/main.go @@ -0,0 +1,111 @@ +package main + +import ( + "bufio" + "bytes" + "fmt" + "log" + "os" + "runtime" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/options" + "github.com/restic/restic/internal/restic" + + "github.com/spf13/cobra" + + "github.com/restic/restic/internal/errors" +) + +// cmdRoot is the base command when no other command has been specified. +var cmdRoot = &cobra.Command{ + Use: "restic", + Short: "Backup and restore files", + Long: ` +restic is a backup program which allows saving multiple revisions of files and +directories in an encrypted repository stored on different backends. +`, + SilenceErrors: true, + SilenceUsage: true, + DisableAutoGenTag: true, + + PersistentPreRunE: func(c *cobra.Command, args []string) error { + // set verbosity, default is one + globalOptions.verbosity = 1 + if globalOptions.Quiet && (globalOptions.Verbose > 1) { + return errors.Fatal("--quiet and --verbose cannot be specified at the same time") + } + + switch { + case globalOptions.Verbose >= 2: + globalOptions.verbosity = 3 + case globalOptions.Verbose > 0: + globalOptions.verbosity = 2 + case globalOptions.Quiet: + globalOptions.verbosity = 0 + } + + // parse extended options + opts, err := options.Parse(globalOptions.Options) + if err != nil { + return err + } + globalOptions.extended = opts + if c.Name() == "version" { + return nil + } + pwd, err := resolvePassword(globalOptions) + if err != nil { + fmt.Fprintf(os.Stderr, "Resolving password failed: %v\n", err) + Exit(1) + } + globalOptions.password = pwd + + // run the debug functions for all subcommands (if build tag "debug" is + // enabled) + if err := runDebug(); err != nil { + return err + } + + return nil + }, +} + +var logBuffer = bytes.NewBuffer(nil) + +func init() { + // install custom global logger into a buffer, if an error occurs + // we can show the logs + log.SetOutput(logBuffer) +} + +func main() { + debug.Log("main %#v", os.Args) + debug.Log("restic %s compiled with %v on %v/%v", + version, runtime.Version(), runtime.GOOS, runtime.GOARCH) + err := cmdRoot.Execute() + + switch { + case restic.IsAlreadyLocked(errors.Cause(err)): + fmt.Fprintf(os.Stderr, "%v\nthe `unlock` command can be used to remove stale locks\n", err) + case errors.IsFatal(errors.Cause(err)): + fmt.Fprintf(os.Stderr, "%v\n", err) + case err != nil: + fmt.Fprintf(os.Stderr, "%+v\n", err) + + if logBuffer.Len() > 0 { + fmt.Fprintf(os.Stderr, "also, the following messages were logged by a library:\n") + sc := bufio.NewScanner(logBuffer) + for sc.Scan() { + fmt.Fprintln(os.Stderr, sc.Text()) + } + } + } + + var exitCode int + if err != nil { + exitCode = 1 + } + + Exit(exitCode) +} diff --git a/cmd/restic/testdata/backup-data.tar.gz b/cmd/restic/testdata/backup-data.tar.gz new file mode 100644 index 000000000..6ba5881ae Binary files /dev/null and b/cmd/restic/testdata/backup-data.tar.gz differ diff --git a/cmd/restic/testdata/old-index-repo.tar.gz b/cmd/restic/testdata/old-index-repo.tar.gz new file mode 100644 index 000000000..9cfc38573 Binary files /dev/null and b/cmd/restic/testdata/old-index-repo.tar.gz differ diff --git a/cmd/restic/testdata/repo-restore-permissions-test.tar.gz b/cmd/restic/testdata/repo-restore-permissions-test.tar.gz new file mode 100644 index 000000000..36aa62dbf Binary files /dev/null and b/cmd/restic/testdata/repo-restore-permissions-test.tar.gz differ diff --git a/cmd/restic/testdata/repo-same-timestamps.tar.gz b/cmd/restic/testdata/repo-same-timestamps.tar.gz new file mode 100644 index 000000000..42cf2b2bb Binary files /dev/null and b/cmd/restic/testdata/repo-same-timestamps.tar.gz differ diff --git a/cmd/restic/testdata/small-repo.tar.gz b/cmd/restic/testdata/small-repo.tar.gz new file mode 100644 index 000000000..92ce87962 Binary files /dev/null and b/cmd/restic/testdata/small-repo.tar.gz differ diff --git a/cmd/restic/testdata/test.hl.tar.gz b/cmd/restic/testdata/test.hl.tar.gz new file mode 100644 index 000000000..302578199 Binary files /dev/null and b/cmd/restic/testdata/test.hl.tar.gz differ diff --git a/contrib/restic.spec b/contrib/restic.spec new file mode 100644 index 000000000..ce809f194 --- /dev/null +++ b/contrib/restic.spec @@ -0,0 +1,72 @@ +Name: restic +Version: 0.9.2git.20180812 +Release: 4%{?dist} +Summary: restic is a backup program that is fast, efficient and secure. + +%global debug_package %{nil} + +Group: Applications/Archiving +License: BSD 2-Clause +URL: https://restic.net/ +Source0: %{name}-%{version}.tar.gz + +BuildRequires: golang +Requires: /bin/bash + +%description +restic is a program that does backups right. The design goals are: + +Easy: Doing backups should be a frictionless process, otherwise you are tempted to skip it. Restic should be easy to configure and use, so that in the unlikely event of a data loss you can just restore it. Likewise, restoring data should not be complicated. + +Fast: Backing up your data with restic should only be limited by your network or hard disk bandwidth so that you can backup your files every day. Nobody does backups if it takes too much time. Restoring backups should only transfer data that is needed for the files that are to be restored, so that this process is also fast. + +Verifiable: Much more important than backup is restore, so restic enables you to easily verify that all data can be restored. + +Secure: Restic uses cryptography to guarantee confidentiality and integrity of your data. The location where the backup data is stored is assumed to be an untrusted environment (e.g. a shared space where others like system administrators are able to access your backups). Restic is built to secure your data against such attackers, by encrypting it with AES-256 in counter mode and authenticating it using Poly1305-AES. + +Efficient: With the growth of data, additional snapshots should only take the storage of the actual increment. Even more, duplicate data should be de-duplicated before it is actually written to the storage backend to save precious backup space. + +Versatile storage: Users can provide many different places to store the backups. Local, SFTP, Restics REST-Server, Amazon S3, Minio, Openstack Swift, Backblaze B2, Microsoft Azure Blob Storage, Google Cloud Storage and more by the usage of rclone. + +Free: restic is free software and licensed under the BSD 2-Clause License and actively developed on GitHub. + +%prep +%setup -q + + +%build +make %{?_smp_mflags} + + +%install +mkdir -p %{buildroot}%{_bindir} +mkdir -p %{buildroot}%{_mandir}/man1 +mkdir -p %{buildroot}%{_datarootdir}/zsh/site-functions +mkdir -p %{buildroot}%{_datarootdir}/bash-completion/completions +install -p -m 644 doc/man/* %{buildroot}%{_mandir}/man1/ +install -p -m 644 doc/zsh-completion.zsh %{buildroot}%{_datarootdir}/zsh/site-functions/_restic +install -p -m 644 doc/bash-completion.sh %{buildroot}%{_datarootdir}/bash-completion/completions/restic +install -p -m 755 %{name} %{buildroot}%{_bindir} + +%files +%doc LICENSE +%doc README.rst +%{_bindir}/%{name} +%dir %{_datadir}/zsh/site-functions +%{_datadir}/zsh/site-functions/_restic +%dir %{_datadir}/bash-completion/ +%dir %{_datadir}/bash-completion/completions +%{_datadir}/bash-completion/completions/restic +%{_mandir}/man1/restic*.* + + +%changelog +* Sun Aug 12 2018 Luc De Louw - 0.9.2git.20180812-4 +- %license does not work with RHEL6, using %doc instead +* Sun Aug 12 2018 Luc De Louw - 0.9.2git.20180812-3 +- Better description +* Sun Aug 12 2018 Luc De Louw - 0.9.2git.20180812-2 +- Initial RPM build +* Sun Aug 12 2018 Luc De Louw - 0.9.2git.20180812-1 +- Initial RPM build + diff --git a/doc.go b/doc.go new file mode 100644 index 000000000..ff0a0796f --- /dev/null +++ b/doc.go @@ -0,0 +1,11 @@ +// Package restic gives a (very brief) introduction to the structure of source code. +// +// Overview +// +// The packages are structured so that cmd/ contains the main package for the +// restic binary, and internal/ contains almost all code in library form. We've +// chosen to use the internal/ path so that the packages cannot be imported by +// other programs. This was done on purpose, at the moment restic is a +// command-line program and not a library. This may be revisited at a later +// point in time. +package restic diff --git a/doc/.gitignore b/doc/.gitignore new file mode 100644 index 000000000..849362b48 --- /dev/null +++ b/doc/.gitignore @@ -0,0 +1,2 @@ +_build +.doctrees diff --git a/doc/010_introduction.rst b/doc/010_introduction.rst new file mode 100644 index 000000000..5c213f6cd --- /dev/null +++ b/doc/010_introduction.rst @@ -0,0 +1,19 @@ +.. + Normally, there are no heading levels assigned to certain characters as the structure is + determined from the succession of headings. However, this convention is used in Python’s + Style Guide for documenting which you may follow: + + # with overline, for parts + * for chapters + = for sections + - for subsections + ^ for subsubsections + " for paragraphs + +############ +Introduction +############ + +Restic is a fast and secure backup program. In the following sections, we will +present typical workflows, starting with installing, preparing a new +repository, and making the first backup. diff --git a/doc/020_installation.rst b/doc/020_installation.rst new file mode 100644 index 000000000..e84e848d6 --- /dev/null +++ b/doc/020_installation.rst @@ -0,0 +1,306 @@ +.. + Normally, there are no heading levels assigned to certain characters as the structure is + determined from the succession of headings. However, this convention is used in Python’s + Style Guide for documenting which you may follow: + + # with overline, for parts + * for chapters + = for sections + - for subsections + ^ for subsubsections + " for paragraphs + +############ +Installation +############ + +Packages +******** + +Note that if at any point the package you’re trying to use is outdated, you +always have the option to use an official binary from the restic project. + +These are up to date binaries, built in a reproducible and verifiable way, that +you can download and run without having to do additional installation work. + +Please see the :ref:`official_binaries` section below for various downloads. +Official binaries can be updated in place by using the ``restic self-update`` +command. + +Arch Linux +========== + +On `Arch Linux `__, there is a package called ``restic`` +installed from the official community repos, e.g. with ``pacman -S``: + +.. code-block:: console + + $ pacman -S restic + +Debian +====== + +On Debian, there's a package called ``restic`` which can be +installed from the official repos, e.g. with ``apt-get``: + +.. code-block:: console + + $ apt-get install restic + + +.. warning:: Please be aware that, at the time of writing, Debian *stable* + has ``restic`` version 0.3.3 which is very old. The *testing* and *unstable* + branches have recent versions of ``restic``. + +Fedora +====== + +restic can be installed using ``dnf``: + +.. code-block:: console + + $ dnf install restic + +If you used restic from copr previously, remove the copr repo as follows to +avoid any conflicts: + +.. code-block:: console + + $ dnf copr remove copart/restic + +macOS +===== + +If you are using macOS, you can install restic using the +`homebrew `__ package manager: + +.. code-block:: console + + $ brew install restic + +Nix & NixOS +=========== + +If you are using `Nix `__ or `NixOS `__ +there is a package available named ``restic``. +It can be installed using ``nix-env``: + +.. code-block:: console + + $ nix-env --install restic + +OpenBSD +======= + +On OpenBSD 6.3 and greater, you can install restic using ``pkg_add``: + +.. code-block:: console + + # pkg_add restic + +FreeBSD +======= + +On FreeBSD (11 and probably later versions), you can install restic using ``pkg install``: + +.. code-block:: console + + # pkg install restic + +RHEL & CentOS +============= + +restic can be installed via copr repository, for RHEL7/CentOS you can try the following: + +.. code-block:: console + + $ yum install yum-plugin-copr + $ yum copr enable copart/restic + $ yum install restic + +If that doesn't work, you can try adding the repository directly, for CentOS6 use: + +.. code-block:: console + + $ yum-config-manager --add-repo https://copr.fedorainfracloud.org/coprs/copart/restic/repo/epel-6/copart-restic-epel-6.repo + +For CentOS7 use: + +.. code-block:: console + + $ yum-config-manager --add-repo https://copr.fedorainfracloud.org/coprs/copart/restic/repo/epel-7/copart-restic-epel-7.repo + +Solus +===== + +restic can be installed from the official repo of Solus via the ``eopkg`` package manager: + +.. code-block:: console + + $ eopkg install restic + +Windows +======= + +restic can be installed using `Scoop `__: + +.. code-block:: console + + scoop install restic + +Using this installation method, ``restic.exe`` will automatically be available +in the ``PATH``. It can be called from cmd.exe or PowerShell by typing ``restic``. + + +.. _official_binaries: + +Official Binaries +***************** + +Stable Releases +=============== + +You can download the latest stable release versions of restic from the `restic +release page `__. These builds +are considered stable and releases are made regularly in a controlled manner. + +There's both pre-compiled binaries for different platforms as well as the source +code available for download. Just download and run the one matching your system. + +The official binaries can be updated in place using the ``restic self-update`` +command: + +.. code-block:: console + + $ restic version + restic 0.9.1 compiled with go1.10.3 on linux/amd64 + + $ restic self-update + find latest release of restic at GitHub + latest version is 0.9.2 + download file SHA256SUMS + download SHA256SUMS + download file SHA256SUMS + download SHA256SUMS.asc + GPG signature verification succeeded + download restic_0.9.2_linux_amd64.bz2 + downloaded restic_0.9.2_linux_amd64.bz2 + saved 12115904 bytes in ./restic + successfully updated restic to version 0.9.2 + + $ restic version + restic 0.9.2 compiled with go1.10.3 on linux/amd64 + +The ``self-update`` command uses the GPG signature on the files uploaded to +GitHub to verify their authenticity. No external programs are necessary. + +.. note:: Please be aware that the user executing the ``restic self-update`` + command must have the permission to replace the restic binary. + If you want to save the downloaded restic binary into a different file, pass + the file name via the option ``--output``. + +Unstable Builds +=============== + +Another option is to use the latest builds for the master branch, available on +the `restic beta download site +`__. These too are pre-compiled +and ready to run, and a new version is built every time a push is made to the +master branch. + +Windows +======= + +On Windows, put the `restic.exe` binary into `%SystemRoot%\\System32` to use restic +in scripts without the need for absolute paths to the binary. This requires +administrator rights. + +Docker Container +**************** + +We're maintaining a bare docker container with just a few files and the restic +binary, you can get it with `docker pull` like this: + +.. code-block:: console + + $ docker pull restic/restic + +.. note:: + | Another docker container which offers more configuration options is + | available as a contribution (Thank you!). You can find it at + | https://github.com/Lobaro/restic-backup-docker + +From Source +*********** + +restic is written in the Go programming language and you need at least +Go version 1.9. Building restic may also work with older versions of Go, +but that's not supported. See the `Getting +started `__ guide of the Go project for +instructions how to install Go. + +In order to build restic from source, execute the following steps: + +.. code-block:: console + + $ git clone https://github.com/restic/restic + [...] + + $ cd restic + + $ go run -mod=vendor build.go + +For Go versions < 1.11, the option ``-mod=vendor`` needs to be removed, like +this: + +.. code-block:: console + + $ go run build.go + +You can easily cross-compile restic for all supported platforms, just +supply the target OS and platform via the command-line options like this +(for Windows and FreeBSD respectively): + +.. code-block:: console + + $ go run -mod=vendor build.go --goos windows --goarch amd64 + + $ go run -mod=vendor build.go --goos freebsd --goarch 386 + + $ go run -mod=vendor build.go --goos linux --goarch arm --goarm 6 + +Again, for Go < 1.11 ``-mod=vendor`` needs to be removed. + +The resulting binary is statically linked and does not require any +libraries. + +At the moment, the only tested compiler for restic is the official Go +compiler. Building restic with gccgo may work, but is not supported. + +Autocompletion +************** + +Restic can write out man pages and bash/zsh compatible autocompletion scripts: + +.. code-block:: console + + $ ./restic generate --help + + The "generate" command writes automatically generated files like the man pages + and the auto-completion files for bash and zsh). + + Usage: + restic generate [command] [flags] + + Flags: + --bash-completion file write bash completion file + -h, --help help for generate + --man directory write man pages to directory + --zsh-completion file write zsh completion file + +Example for using sudo to write a bash completion script directly to the system-wide location: + +.. code-block:: console + + $ sudo ./restic generate --bash-completion /etc/bash_completion.d/restic + writing bash completion file to /etc/bash_completion.d/restic diff --git a/doc/030_preparing_a_new_repo.rst b/doc/030_preparing_a_new_repo.rst new file mode 100644 index 000000000..6596ef23a --- /dev/null +++ b/doc/030_preparing_a_new_repo.rst @@ -0,0 +1,531 @@ +.. + Normally, there are no heading levels assigned to certain characters as the structure is + determined from the succession of headings. However, this convention is used in Python’s + Style Guide for documenting which you may follow: + + # with overline, for parts + * for chapters + = for sections + - for subsections + ^ for subsubsections + " for paragraphs + +########################## +Preparing a new repository +########################## + +The place where your backups will be saved is called a "repository". +This chapter explains how to create ("init") such a repository. The repository +can be stored locally, or on some remote server or service. We'll first cover +using a local repository; the remaining sections of this chapter cover all the +other options. You can skip to the next chapter once you've read the relevant +section here. + +For automated backups, restic accepts the repository location in the +environment variable ``RESTIC_REPOSITORY``. For the password, several options +exist: + + * Setting the environment variable ``RESTIC_PASSWORD`` + + * Specifying the path to a file with the password via the option + ``--password-file`` or the environment variable ``RESTIC_PASSWORD_FILE`` + + * Configuring a program to be called when the password is needed via the + option ``--password-command`` or the environment variable + ``RESTIC_PASSWORD_COMMAND`` + +Local +***** + +In order to create a repository at ``/srv/restic-repo``, run the following +command and enter the same password twice: + +.. code-block:: console + + $ restic init --repo /srv/restic-repo + enter password for new backend: + enter password again: + created restic backend 085b3c76b9 at /srv/restic-repo + Please note that knowledge of your password is required to access the repository. + Losing your password means that your data is irrecoverably lost. + +.. warning:: + + Remembering your password is important! If you lose it, you won't be + able to access data stored in the repository. + +SFTP +**** + +In order to backup data via SFTP, you must first set up a server with +SSH and let it know your public key. Passwordless login is really +important since restic fails to connect to the repository if the server +prompts for credentials. + +Once the server is configured, the setup of the SFTP repository can +simply be achieved by changing the URL scheme in the ``init`` command: + +.. code-block:: console + + $ restic -r sftp:user@host:/srv/restic-repo init + enter password for new backend: + enter password again: + created restic backend f1c6108821 at sftp:user@host:/srv/restic-repo + Please note that knowledge of your password is required to access the repository. + Losing your password means that your data is irrecoverably lost. + +You can also specify a relative (read: no slash (``/``) character at the +beginning) directory, in this case the dir is relative to the remote +user's home directory. + +.. note:: Please be aware that sftp servers do not expand the tilde character + (``~``) normally used as an alias for a user's home directory. If you + want to specify a path relative to the user's home directory, pass a + relative path to the sftp backend. + +The backend config string does not allow specifying a port. If you need +to contact an sftp server on a different port, you can create an entry +in the ``ssh`` file, usually located in your user's home directory at +``~/.ssh/config`` or in ``/etc/ssh/ssh_config``: + +:: + + Host foo + User bar + Port 2222 + +Then use the specified host name ``foo`` normally (you don't need to +specify the user name in this case): + +:: + + $ restic -r sftp:foo:/srv/restic-repo init + +You can also add an entry with a special host name which does not exist, +just for use with restic, and use the ``Hostname`` option to set the +real host name: + +:: + + Host restic-backup-host + Hostname foo + User bar + Port 2222 + +Then use it in the backend specification: + +:: + + $ restic -r sftp:restic-backup-host:/srv/restic-repo init + +Last, if you'd like to use an entirely different program to create the +SFTP connection, you can specify the command to be run with the option +``-o sftp.command="foobar"``. + + +REST Server +*********** + +In order to backup data to the remote server via HTTP or HTTPS protocol, +you must first set up a remote `REST +server `__ instance. Once the +server is configured, accessing it is achieved by changing the URL +scheme like this: + +.. code-block:: console + + $ restic -r rest:http://host:8000/ + +Depending on your REST server setup, you can use HTTPS protocol, +password protection, multiple repositories or any combination of +those features. The TCP/IP port is also configurable. Here +are some more examples: + +.. code-block:: console + + $ restic -r rest:https://host:8000/ + $ restic -r rest:https://user:pass@host:8000/ + $ restic -r rest:https://user:pass@host:8000/my_backup_repo/ + +If you use TLS, restic will use the system's CA certificates to verify the +server certificate. When the verification fails, restic refuses to proceed and +exits with an error. If you have your own self-signed certificate, or a custom +CA certificate should be used for verification, you can pass restic the +certificate filename via the ``--cacert`` option. It will then verify that the +server's certificate is contained in the file passed to this option, or signed +by a CA certificate in the file. In this case, the system CA certificates are +not considered at all. + +REST server uses exactly the same directory structure as local backend, +so you should be able to access it both locally and via HTTP, even +simultaneously. + +Amazon S3 +********* + +Restic can backup data to any Amazon S3 bucket. However, in this case, +changing the URL scheme is not enough since Amazon uses special security +credentials to sign HTTP requests. By consequence, you must first setup +the following environment variables with the credentials you obtained +while creating the bucket. + +.. code-block:: console + + $ export AWS_ACCESS_KEY_ID= + $ export AWS_SECRET_ACCESS_KEY= + +You can then easily initialize a repository that uses your Amazon S3 as +a backend. If the bucket does not exist it will be created in the +default location: + +.. code-block:: console + + $ restic -r s3:s3.amazonaws.com/bucket_name init + enter password for new backend: + enter password again: + created restic backend eefee03bbd at s3:s3.amazonaws.com/bucket_name + Please note that knowledge of your password is required to access the repository. + Losing your password means that your data is irrecoverably lost. + +It is not possible at the moment to have restic create a new bucket in a +different location, so you need to create it using a different program. +Afterwards, the S3 server (``s3.amazonaws.com``) will redirect restic to +the correct endpoint. + +Until version 0.8.0, restic used a default prefix of ``restic``, so the files +in the bucket were placed in a directory named ``restic``. If you want to +access a repository created with an older version of restic, specify the path +after the bucket name like this: + +.. code-block:: console + + $ restic -r s3:s3.amazonaws.com/bucket_name/restic [...] + +For an S3-compatible server that is not Amazon (like Minio, see below), +or is only available via HTTP, you can specify the URL to the server +like this: ``s3:http://server:port/bucket_name``. + +Minio Server +************ + +`Minio `__ is an Open Source Object Storage, +written in Go and compatible with AWS S3 API. + +- Download and Install `Minio + Server `__. +- You can also refer to https://docs.minio.io for step by step guidance + on installation and getting started on Minio Client and Minio Server. + +You must first setup the following environment variables with the +credentials of your Minio Server. + +.. code-block:: console + + $ export AWS_ACCESS_KEY_ID= + $ export AWS_SECRET_ACCESS_KEY= + +Now you can easily initialize restic to use Minio server as backend with +this command. + +.. code-block:: console + + $ ./restic -r s3:http://localhost:9000/restic init + enter password for new backend: + enter password again: + created restic backend 6ad29560f5 at s3:http://localhost:9000/restic1 + Please note that knowledge of your password is required to access + the repository. Losing your password means that your data is irrecoverably lost. + +OpenStack Swift +*************** + +Restic can backup data to an OpenStack Swift container. Because Swift supports +various authentication methods, credentials are passed through environment +variables. In order to help integration with existing OpenStack installations, +the naming convention of those variables follows the official Python Swift client: + +.. code-block:: console + + # For keystone v1 authentication + $ export ST_AUTH= + $ export ST_USER= + $ export ST_KEY= + + # For keystone v2 authentication (some variables are optional) + $ export OS_AUTH_URL= + $ export OS_REGION_NAME= + $ export OS_USERNAME= + $ export OS_PASSWORD= + $ export OS_TENANT_ID= + $ export OS_TENANT_NAME= + + # For keystone v3 authentication (some variables are optional) + $ export OS_AUTH_URL= + $ export OS_REGION_NAME= + $ export OS_USERNAME= + $ export OS_PASSWORD= + $ export OS_USER_DOMAIN_NAME= + $ export OS_PROJECT_NAME= + $ export OS_PROJECT_DOMAIN_NAME= + + # For authentication based on tokens + $ export OS_STORAGE_URL= + $ export OS_AUTH_TOKEN= + + +Restic should be compatible with an `OpenStack RC file +`__ +in most cases. + +Once environment variables are set up, a new repository can be created. The +name of the Swift container and optional path can be specified. If +the container does not exist, it will be created automatically: + +.. code-block:: console + + $ restic -r swift:container_name:/path init # path is optional + enter password for new backend: + enter password again: + created restic backend eefee03bbd at swift:container_name:/path + Please note that knowledge of your password is required to access the repository. + Losing your password means that your data is irrecoverably lost. + +The policy of the new container created by restic can be changed using environment variable: + +.. code-block:: console + + $ export SWIFT_DEFAULT_CONTAINER_POLICY= + + +Backblaze B2 +************ + +Restic can backup data to any Backblaze B2 bucket. You need to first setup the +following environment variables with the credentials you can find in the +dashboard in on the "Buckets" page when signed into your B2 account: + +.. code-block:: console + + $ export B2_ACCOUNT_ID= + $ export B2_ACCOUNT_KEY= + +.. note:: In case you want to use Backblaze Application Keys replace and with and respectively. + +You can then initialize a repository stored at Backblaze B2. If the +bucket does not exist yet and the credentials you passed to restic have the +privilege to create buckets, it will be created automatically: + +.. code-block:: console + + $ restic -r b2:bucketname:path/to/repo init + enter password for new backend: + enter password again: + created restic backend eefee03bbd at b2:bucketname:path/to/repo + Please note that knowledge of your password is required to access the repository. + Losing your password means that your data is irrecoverably lost. + +Note that the bucket name must be unique across all of B2. + +The number of concurrent connections to the B2 service can be set with the ``-o +b2.connections=10`` switch. By default, at most five parallel connections are +established. + +Microsoft Azure Blob Storage +**************************** + +You can also store backups on Microsoft Azure Blob Storage. Export the Azure +account name and key as follows: + +.. code-block:: console + + $ export AZURE_ACCOUNT_NAME= + $ export AZURE_ACCOUNT_KEY= + +Afterwards you can initialize a repository in a container called ``foo`` in the +root path like this: + +.. code-block:: console + + $ restic -r azure:foo:/ init + enter password for new backend: + enter password again: + + created restic backend a934bac191 at azure:foo:/ + [...] + +The number of concurrent connections to the Azure Blob Storage service can be set with the +``-o azure.connections=10`` switch. By default, at most five parallel connections are +established. + +Google Cloud Storage +******************** + +Restic supports Google Cloud Storage as a backend and connects via a `service account`_. + +For normal restic operation, the service account must have the +``storage.objects.{create,delete,get,list}`` permissions for the bucket. These +are included in the "Storage Object Admin" role. +``restic init`` can create the repository bucket. Doing so requires the +``storage.buckets.create`` permission ("Storage Admin" role). If the bucket +already exists, that permission is unnecessary. + +To use the Google Cloud Storage backend, first `create a service account key`_ +and download the JSON credentials file. +Second, find the Google Project ID that you can see in the Google Cloud +Platform console at the "Storage/Settings" menu. Export the path to the JSON +key file and the project ID as follows: + +.. code-block:: console + + $ export GOOGLE_PROJECT_ID=123123123123 + $ export GOOGLE_APPLICATION_CREDENTIALS=$HOME/.config/gs-secret-restic-key.json + +Restic uses Google's client library to generate `default authentication material`_, +which means if you're running in Google Container Engine or are otherwise +located on an instance with default service accounts then these should work out of +the box. + +Once authenticated, you can use the ``gs:`` backend type to create a new +repository in the bucket ``foo`` at the root path: + +.. code-block:: console + + $ restic -r gs:foo:/ init + enter password for new backend: + enter password again: + + created restic backend bde47d6254 at gs:foo2/ + [...] + +The number of concurrent connections to the GCS service can be set with the +``-o gs.connections=10`` switch. By default, at most five parallel connections are +established. + +.. _service account: https://cloud.google.com/storage/docs/authentication#service_accounts +.. _create a service account key: https://cloud.google.com/storage/docs/authentication#generating-a-private-key +.. _default authentication material: https://developers.google.com/identity/protocols/application-default-credentials + +Other Services via rclone +************************* + +The program `rclone`_ can be used to access many other different services and +store data there. First, you need to install and `configure`_ rclone. The +general backend specification format is ``rclone::``, the +``:`` component will be directly passed to rclone. When you +configure a remote named ``foo``, you can then call restic as follows to +initiate a new repository in the path ``bar`` in the repo: + +.. code-block:: console + + $ restic -r rclone:foo:bar init + +Restic takes care of starting and stopping rclone. + +As a more concrete example, suppose you have configured a remote named +``b2prod`` for Backblaze B2 with rclone, with a bucket called ``yggdrasil``. +You can then use rclone to list files in the bucket like this: + +.. code-block:: console + + $ rclone ls b2prod:yggdrasil + +In order to create a new repository in the root directory of the bucket, call +restic like this: + +.. code-block:: console + + $ restic -r rclone:b2prod:yggdrasil init + +If you want to use the path ``foo/bar/baz`` in the bucket instead, pass this to +restic: + +.. code-block:: console + + $ restic -r rclone:b2prod:yggdrasil/foo/bar/baz init + +Listing the files of an empty repository directly with rclone should return a +listing similar to the following: + +.. code-block:: console + + $ rclone ls b2prod:yggdrasil/foo/bar/baz + 155 bar/baz/config + 448 bar/baz/keys/4bf9c78049de689d73a56ed0546f83b8416795295cda12ec7fb9465af3900b44 + +Rclone can be `configured with environment variables`_, so for instance +configuring a bandwidth limit for rclone can be achieved by setting the +``RCLONE_BWLIMIT`` environment variable: + +.. code-block:: console + + $ export RCLONE_BWLIMIT=1M + +For debugging rclone, you can set the environment variable ``RCLONE_VERBOSE=2``. + +The rclone backend has two additional options: + + * ``-o rclone.program`` specifies the path to rclone, the default value is just ``rclone`` + * ``-o rclone.args`` allows setting the arguments passed to rclone, by default this is ``serve restic --stdio --b2-hard-delete --drive-use-trash=false`` + +The reason for the two last parameters (``--b2-hard-delete`` and +``--drive-use-trash=false``) can be found in the corresponding GitHub `issue #1657`_. + +In order to start rclone, restic will build a list of arguments by joining the +following lists (in this order): ``rclone.program``, ``rclone.args`` and as the +last parameter the value that follows the ``rclone:`` prefix of the repository +specification. + +So, calling restic like this + +.. code-block:: console + + $ restic -o rclone.program="/path/to/rclone" \ + -o rclone.args="serve restic --stdio --bwlimit 1M --b2-hard-delete --verbose" \ + -r rclone:b2:foo/bar + +runs rclone as follows: + +.. code-block:: console + + $ /path/to/rclone serve restic --stdio --bwlimit 1M --b2-hard-delete --verbose b2:foo/bar + +Manually setting ``rclone.program`` also allows running a remote instance of +rclone e.g. via SSH on a server, for example: + +.. code-block:: console + + $ restic -o rclone.program="ssh user@host rclone" -r rclone:b2:foo/bar + +The rclone command may also be hard-coded in the SSH configuration or the +user's public key, in this case it may be sufficient to just start the SSH +connection (and it's irrelevant what's passed after ``rclone:`` in the +repository specification): + +.. code-block:: console + + $ restic -o rclone.program="ssh user@host" -r rclone:x + +.. _rclone: https://rclone.org/ +.. _configure: https://rclone.org/docs/ +.. _configured with environment variables: https://rclone.org/docs/#environment-variables +.. _issue #1657: https://github.com/restic/restic/pull/1657#issuecomment-377707486 + +Password prompt on Windows +************************** + +At the moment, restic only supports the default Windows console +interaction. If you use emulation environments like +`MSYS2 `__ or +`Cygwin `__, which use terminals like +``Mintty`` or ``rxvt``, you may get a password error. + +You can workaround this by using a special tool called ``winpty`` (look +`here `__ and +`here `__ for detail information). +On MSYS2, you can install ``winpty`` as follows: + +.. code-block:: console + + $ pacman -S winpty + $ winpty restic -r /srv/restic-repo init + diff --git a/doc/040_backup.rst b/doc/040_backup.rst new file mode 100644 index 000000000..cb51d5c04 --- /dev/null +++ b/doc/040_backup.rst @@ -0,0 +1,380 @@ +.. + Normally, there are no heading levels assigned to certain characters as the structure is + determined from the succession of headings. However, this convention is used in Python’s + Style Guide for documenting which you may follow: + + # with overline, for parts + * for chapters + = for sections + - for subsections + ^ for subsubsections + " for paragraphs + +########## +Backing up +########## + +Now we're ready to backup some data. The contents of a directory at a +specific point in time is called a "snapshot" in restic. Run the +following command and enter the repository password you chose above +again: + +.. code-block:: console + + $ restic -r /srv/restic-repo --verbose backup ~/work + open repository + enter password for repository: + password is correct + lock repository + load index files + start scan + start backup + scan finished in 1.837s + processed 1.720 GiB in 0:12 + Files: 5307 new, 0 changed, 0 unmodified + Dirs: 1867 new, 0 changed, 0 unmodified + Added: 1.200 GiB + snapshot 40dc1520 saved + +As you can see, restic created a backup of the directory and was pretty +fast! The specific snapshot just created is identified by a sequence of +hexadecimal characters, ``40dc1520`` in this case. + +You can see that restic tells us it processed 1.720 GiB of data, this is the +size of the files and directories in ``~/work`` on the local file system. It +also tells us that only 1.200 GiB was added to the repository. This means that +some of the data was duplicate and restic was able to efficiently reduce it. + +If you don't pass the ``--verbose`` option, restic will print less data. You'll +still get a nice live status display. Be aware that the live status shows the +processed files and not the transferred data. Transferred volume might be lower +(due to de-duplication) or higher. + +If you run the command again, restic will create another snapshot of +your data, but this time it's even faster and no new data was added to the +repository (since all data is already there). This is de-duplication at work! + +.. code-block:: console + + $ restic -r /srv/restic-repo backup --verbose ~/work + open repository + enter password for repository: + password is correct + lock repository + load index files + using parent snapshot d875ae93 + start scan + start backup + scan finished in 1.881s + processed 1.720 GiB in 0:03 + Files: 0 new, 0 changed, 5307 unmodified + Dirs: 0 new, 0 changed, 1867 unmodified + Added: 0 B + snapshot 79766175 saved + +You can even backup individual files in the same repository (not passing +``--verbose`` means less output): + +.. code-block:: console + + $ restic -r /srv/restic-repo backup ~/work.txt + enter password for repository: + password is correct + snapshot 249d0210 saved + +If you're interested in what restic does, pass ``--verbose`` twice (or +``--verbose 2``) to display detailed information about each file and directory +restic encounters: + +.. code-block:: console + + $ echo 'more data foo bar' >> ~/work.txt + + $ restic -r /srv/restic-repo backup --verbose --verbose ~/work.txt + open repository + enter password for repository: + password is correct + lock repository + load index files + using parent snapshot f3f8d56b + start scan + start backup + scan finished in 2.115s + modified /home/user/work.txt, saved in 0.007s (22 B added) + modified /home/user/, saved in 0.008s (0 B added, 378 B metadata) + modified /home/, saved in 0.009s (0 B added, 375 B metadata) + processed 22 B in 0:02 + Files: 0 new, 1 changed, 0 unmodified + Dirs: 0 new, 2 changed, 0 unmodified + Data Blobs: 1 new + Tree Blobs: 3 new + Added: 1.116 KiB + snapshot 8dc503fc saved + +In fact several hosts may use the same repository to backup directories +and files leading to a greater de-duplication. + +Please be aware that when you backup different directories (or the +directories to be saved have a variable name component like a +time/date), restic always needs to read all files and only afterwards +can compute which parts of the files need to be saved. When you backup +the same directory again (maybe with new or changed files) restic will +find the old snapshot in the repo and by default only reads those files +that are new or have been modified since the last snapshot. This is +decided based on the following attributes of the file in the file system: + + * Type (file, symlink, or directory?) + * Modification time + * Size + * Inode number (internal number used to reference a file in a file system) + +Now is a good time to run ``restic check`` to verify that all data +is properly stored in the repository. You should run this command regularly +to make sure the internal structure of the repository is free of errors. + +Including and Excluding Files +***************************** + +You can exclude folders and files by specifying exclude patterns, currently +the exclude options are: + +- ``--exclude`` Specified one or more times to exclude one or more items +- ``--exclude-caches`` Specified once to exclude folders containing a special file +- ``--exclude-file`` Specified one or more times to exclude items listed in a given file +- ``--exclude-if-present`` Specified one or more times to exclude a folders content + if it contains a given file (optionally having a given header) + + Let's say we have a file called ``excludes.txt`` with the following content: + +:: + + # exclude go-files + *.go + # exclude foo/x/y/z/bar foo/x/bar foo/bar + foo/**/bar + +It can be used like this: + +.. code-block:: console + + $ restic -r /srv/restic-repo backup ~/work --exclude="*.c" --exclude-file=excludes.txt + +This instruct restic to exclude files matching the following criteria: + + * All files matching ``*.go`` (second line in ``excludes.txt``) + * All files and sub-directories named ``bar`` which reside somewhere below a directory called ``foo`` (fourth line in ``excludes.txt``) + * All files matching ``*.c`` (parameter ``--exclude``) + +Please see ``restic help backup`` for more specific information about each exclude option. + +Patterns use `filepath.Glob `__ internally, +see `filepath.Match `__ for +syntax. Patterns are tested against the full path of a file/dir to be saved, +even if restic is passed a relative path to save. Environment-variables in +exclude-files are expanded with `os.ExpandEnv `__, +so `/home/$USER/foo` will be expanded to `/home/bob/foo` for the user `bob`. To +get a literal dollar sign, write `$$` to the file. + +Patterns need to match on complete path components. For example, the pattern ``foo``: + + * matches ``/dir1/foo/dir2/file`` and ``/dir/foo`` + * does not match ``/dir/foobar`` or ``barfoo`` + +A trailing ``/`` is ignored, a leading ``/`` anchors the +pattern at the root directory. This means, ``/bin`` matches ``/bin/bash`` but +does not match ``/usr/bin/restic``. + +Regular wildcards cannot be used to match over the +directory separator ``/``. For example: ``b*ash`` matches ``/bin/bash`` but does not match +``/bin/ash``. + +For this, the special wildcard ``**`` can be used to match arbitrary +sub-directories: The pattern ``foo/**/bar`` matches: + + * ``/dir1/foo/dir2/bar/file`` + * ``/foo/bar/file`` + * ``/tmp/foo/bar`` + +By specifying the option ``--one-file-system`` you can instruct restic +to only backup files from the file systems the initially specified files +or directories reside on. For example, calling restic like this won't +backup ``/sys`` or ``/dev`` on a Linux system: + +.. code-block:: console + + $ restic -r /srv/restic-repo backup --one-file-system / + +.. note:: ``--one-file-system`` is currently unsupported on Windows, and will + cause the backup to immediately fail with an error. + +By using the ``--files-from`` option you can read the files you want to +backup from one or more files. This is especially useful if a lot of files have +to be backed up that are not in the same folder or are maybe pre-filtered +by other software. + +For example maybe you want to backup files which have a name that matches a +certain pattern: + +.. code-block:: console + + $ find /tmp/somefiles | grep 'PATTERN' > /tmp/files_to_backup + +You can then use restic to backup the filtered files: + +.. code-block:: console + + $ restic -r /srv/restic-repo backup --files-from /tmp/files_to_backup + +Incidentally you can also combine ``--files-from`` with the normal files +args: + +.. code-block:: console + + $ restic -r /srv/restic-repo backup --files-from /tmp/files_to_backup /tmp/some_additional_file + +Paths in the listing file can be absolute or relative. + +Comparing Snapshots +******************* + +Restic has a `diff` command which shows the difference between two snapshots +and displays a small statistic, just pass the command two snapshot IDs: + +.. code-block:: console + + $ restic -r /srv/restic-repo diff 5845b002 2ab627a6 + password is correct + comparing snapshot ea657ce5 to 2ab627a6: + + C /restic/cmd_diff.go + + /restic/foo + C /restic/restic + + Files: 0 new, 0 removed, 2 changed + Dirs: 1 new, 0 removed + Others: 0 new, 0 removed + Data Blobs: 14 new, 15 removed + Tree Blobs: 2 new, 1 removed + Added: 16.403 MiB + Removed: 16.402 MiB + + +Backing up special items and metadata +************************************* + +**Symlinks** are archived as symlinks, ``restic`` does not follow them. +When you restore, you get the same symlink again, with the same link target +and the same timestamps. + +If there is a **bind-mount** below a directory that is to be saved, restic descends into it. + +**Device files** are saved and restored as device files. This means that e.g. ``/dev/sda`` is +archived as a block device file and restored as such. This also means that the content of the +corresponding disk is not read, at least not from the device file. + +By default, restic does not save the access time (atime) for any files or other +items, since it is not possible to reliably disable updating the access time by +restic itself. This means that for each new backup a lot of metadata is +written, and the next backup needs to write new metadata again. If you really +want to save the access time for files and directories, you can pass the +``--with-atime`` option to the ``backup`` command. + +Reading data from stdin +*********************** + +Sometimes it can be nice to directly save the output of a program, e.g. +``mysqldump`` so that the SQL can later be restored. Restic supports +this mode of operation, just supply the option ``--stdin`` to the +``backup`` command like this: + +.. code-block:: console + + $ mysqldump [...] | restic -r /srv/restic-repo backup --stdin + +This creates a new snapshot of the output of ``mysqldump``. You can then +use e.g. the fuse mounting option (see below) to mount the repository +and read the file. + +By default, the file name ``stdin`` is used, a different name can be +specified with ``--stdin-filename``, e.g. like this: + +.. code-block:: console + + $ mysqldump [...] | restic -r /srv/restic-repo backup --stdin --stdin-filename production.sql + +Tags for backup +*************** + +Snapshots can have one or more tags, short strings which add identifying +information. Just specify the tags for a snapshot one by one with ``--tag``: + +.. code-block:: console + + $ restic -r /srv/restic-repo backup --tag projectX --tag foo --tag bar ~/work + [...] + +The tags can later be used to keep (or forget) snapshots with the ``forget`` +command. The command ``tag`` can be used to modify tags on an existing +snapshot. + +Space requirements +****************** + +Restic currently assumes that your backup repository has sufficient space +for the backup operation you are about to perform. This is a realistic +assumption for many cloud providers, but may not be true when backing up +to local disks. + +Should you run out of space during the middle of a backup, there will be +some additional data in the repository, but the snapshot will never be +created as it would only be written at the very (successful) end of +the backup operation. Previous snapshots will still be there and will still +work. + + +Environment Variables +********************* + +In addition to command-line options, restic supports passing various options in +environment variables. The following list of environment variables: + +.. code-block:: console + + RESTIC_REPOSITORY Location of repository (replaces -r) + RESTIC_PASSWORD_FILE Location of password file (replaces --password-file) + RESTIC_PASSWORD The actual password for the repository + + AWS_ACCESS_KEY_ID Amazon S3 access key ID + AWS_SECRET_ACCESS_KEY Amazon S3 secret access key + + ST_AUTH Auth URL for keystone v1 authentication + ST_USER Username for keystone v1 authentication + ST_KEY Password for keystone v1 authentication + + OS_AUTH_URL Auth URL for keystone authentication + OS_REGION_NAME Region name for keystone authentication + OS_USERNAME Username for keystone authentication + OS_PASSWORD Password for keystone authentication + OS_TENANT_ID Tenant ID for keystone v2 authentication + OS_TENANT_NAME Tenant name for keystone v2 authentication + + OS_USER_DOMAIN_NAME User domain name for keystone authentication + OS_PROJECT_NAME Project name for keystone authentication + OS_PROJECT_DOMAIN_NAME PRoject domain name for keystone authentication + + OS_STORAGE_URL Storage URL for token authentication + OS_AUTH_TOKEN Auth token for token authentication + + B2_ACCOUNT_ID Account ID or applicationKeyId for Backblaze B2 + B2_ACCOUNT_KEY Account Key or applicationKey for Backblaze B2 + + AZURE_ACCOUNT_NAME Account name for Azure + AZURE_ACCOUNT_KEY Account key for Azure + + GOOGLE_PROJECT_ID Project ID for Google Cloud Storage + GOOGLE_APPLICATION_CREDENTIALS Application Credentials for Google Cloud Storage (e.g. $HOME/.config/gs-secret-restic-key.json) + + RCLONE_BWLIMIT rclone bandwidth limit + + + diff --git a/doc/045_working_with_repos.rst b/doc/045_working_with_repos.rst new file mode 100644 index 000000000..519238a51 --- /dev/null +++ b/doc/045_working_with_repos.rst @@ -0,0 +1,115 @@ +.. + Normally, there are no heading levels assigned to certain characters as the structure is + determined from the succession of headings. However, this convention is used in Python’s + Style Guide for documenting which you may follow: + + # with overline, for parts + * for chapters + = for sections + - for subsections + ^ for subsubsections + " for paragraphs + + +######################### +Working with repositories +######################### + +Listing all snapshots +===================== + +Now, you can list all the snapshots stored in the repository: + +.. code-block:: console + + $ restic -r /srv/restic-repo snapshots + enter password for repository: + ID Date Host Tags Directory + ---------------------------------------------------------------------- + 40dc1520 2015-05-08 21:38:30 kasimir /home/user/work + 79766175 2015-05-08 21:40:19 kasimir /home/user/work + bdbd3439 2015-05-08 21:45:17 luigi /home/art + 590c8fc8 2015-05-08 21:47:38 kazik /srv + 9f0bc19e 2015-05-08 21:46:11 luigi /srv + +You can filter the listing by directory path: + +.. code-block:: console + + $ restic -r /srv/restic-repo snapshots --path="/srv" + enter password for repository: + ID Date Host Tags Directory + ---------------------------------------------------------------------- + 590c8fc8 2015-05-08 21:47:38 kazik /srv + 9f0bc19e 2015-05-08 21:46:11 luigi /srv + +Or filter by host: + +.. code-block:: console + + $ restic -r /srv/restic-repo snapshots --host luigi + enter password for repository: + ID Date Host Tags Directory + ---------------------------------------------------------------------- + bdbd3439 2015-05-08 21:45:17 luigi /home/art + 9f0bc19e 2015-05-08 21:46:11 luigi /srv + +Combining filters is also possible. + + +Checking a repo's integrity and consistency +=========================================== + +Imagine your repository is saved on a server that has a faulty hard +drive, or even worse, attackers get privileged access and modify your +backup with the intention to make you restore malicious data: + +.. code-block:: console + + $ echo "boom" >> backup/index/d795ffa99a8ab8f8e42cec1f814df4e48b8f49129360fb57613df93739faee97 + +In order to detect these things, it is a good idea to regularly use the +``check`` command to test whether everything is alright, your precious +backup data is consistent and the integrity is unharmed: + +.. code-block:: console + + $ restic -r /srv/restic-repo check + Load indexes + ciphertext verification failed + +Trying to restore a snapshot which has been modified as shown above will +yield the same error: + +.. code-block:: console + + $ restic -r /srv/restic-repo restore 79766175 --target /tmp/restore-work + Load indexes + ciphertext verification failed + +By default, ``check`` command does not check that repository data files +are unmodified. Use ``--read-data`` parameter to check all repository +data files: + +.. code-block:: console + + $ restic -r /srv/restic-repo check --read-data + load indexes + check all packs + check snapshots, trees and blobs + read all data + +Use ``--read-data-subset=n/t`` parameter to check subset of repository data +files. The parameter takes two values, ``n`` and ``t``. All repository data +files are logically devided in ``t`` roughly equal groups and only files that +belong to the group number ``n`` are checked. For example, the following +commands check all repository data files over 5 separate invocations: + +.. code-block:: console + + $ restic -r /srv/restic-repo check --read-data-subset=1/5 + $ restic -r /srv/restic-repo check --read-data-subset=2/5 + $ restic -r /srv/restic-repo check --read-data-subset=3/5 + $ restic -r /srv/restic-repo check --read-data-subset=4/5 + $ restic -r /srv/restic-repo check --read-data-subset=5/5 + diff --git a/doc/050_restore.rst b/doc/050_restore.rst new file mode 100644 index 000000000..e602c7e8a --- /dev/null +++ b/doc/050_restore.rst @@ -0,0 +1,122 @@ +.. + Normally, there are no heading levels assigned to certain characters as the structure is + determined from the succession of headings. However, this convention is used in Python’s + Style Guide for documenting which you may follow: + + # with overline, for parts + * for chapters + = for sections + - for subsections + ^ for subsubsections + " for paragraphs + +##################### +Restoring from backup +##################### + +Restoring from a snapshot +========================= + +Restoring a snapshot is as easy as it sounds, just use the following +command to restore the contents of the latest snapshot to +``/tmp/restore-work``: + +.. code-block:: console + + $ restic -r /srv/restic-repo restore 79766175 --target /tmp/restore-work + enter password for repository: + restoring to /tmp/restore-work + +Use the word ``latest`` to restore the last backup. You can also combine +``latest`` with the ``--host`` and ``--path`` filters to choose the last +backup for a specific host, path or both. + +.. code-block:: console + + $ restic -r /srv/restic-repo restore latest --target /tmp/restore-art --path "/home/art" --host luigi + enter password for repository: + restoring to /tmp/restore-art + +Use ``--exclude`` and ``--include`` to restrict the restore to a subset of +files in the snapshot. For example, to restore a single file: + +.. code-block:: console + + $ restic -r /srv/restic-repo restore 79766175 --target /tmp/restore-work --include /work/foo + enter password for repository: + restoring to /tmp/restore-work + +This will restore the file ``foo`` to ``/tmp/restore-work/work/foo``. + +You can use the command ``restic ls latest`` or ``restic find foo`` to find the +path to the file within the snapshot. This path you can then pass to +`--include` in verbatim to only restore the single file or directory. + +Restore using mount +=================== + +Browsing your backup as a regular file system is also very easy. First, +create a mount point such as ``/mnt/restic`` and then use the following +command to serve the repository with FUSE: + +.. code-block:: console + + $ mkdir /mnt/restic + $ restic -r /srv/restic-repo mount /mnt/restic + enter password for repository: + Now serving /srv/restic-repo at /mnt/restic + Don't forget to umount after quitting! + +Mounting repositories via FUSE is not possible on OpenBSD, Solaris/illumos +and Windows. For Linux, the ``fuse`` kernel module needs to be loaded. For +FreeBSD, you may need to install FUSE and load the kernel module (``kldload +fuse``). + +Restic supports storage and preservation of hard links. However, since +hard links exist in the scope of a filesystem by definition, restoring +hard links from a fuse mount should be done by a program that preserves +hard links. A program that does so is ``rsync``, used with the option +--hard-links. + +Printing files to stdout +======================== + +Sometimes it's helpful to print files to stdout so that other programs can read +the data directly. This can be achieved by using the `dump` command, like this: + +.. code-block:: console + + $ restic -r /srv/restic-repo dump latest production.sql | mysql + +If you have saved multiple different things into the same repo, the ``latest`` +snapshot may not be the right one. For example, consider the following +snapshots in a repo: + +.. code-block:: console + + $ restic -r /srv/restic-repo snapshots + ID Date Host Tags Directory + ---------------------------------------------------------------------- + 562bfc5e 2018-07-14 20:18:01 mopped /home/user/file1 + bbacb625 2018-07-14 20:18:07 mopped /home/other/work + e922c858 2018-07-14 20:18:10 mopped /home/other/work + 098db9d5 2018-07-14 20:18:13 mopped /production.sql + b62f46ec 2018-07-14 20:18:16 mopped /home/user/file1 + 1541acae 2018-07-14 20:18:18 mopped /home/other/work + ---------------------------------------------------------------------- + +Here, restic would resolve ``latest`` to the snapshot ``1541acae``, which does +not contain the file we'd like to print at all (``production.sql``). In this +case, you can pass restic the snapshot ID of the snapshot you like to restore: + +.. code-block:: console + + $ restic -r /srv/restic-repo dump 098db9d5 production.sql | mysql + +Or you can pass restic a path that should be used for selecting the latest +snapshot. The path must match the patch printed in the "Directory" column, +e.g.: + +.. code-block:: console + + $ restic -r /srv/restic-repo dump --path /production.sql latest production.sql | mysql diff --git a/doc/060_forget.rst b/doc/060_forget.rst new file mode 100644 index 000000000..a1edc55fc --- /dev/null +++ b/doc/060_forget.rst @@ -0,0 +1,230 @@ +.. + Normally, there are no heading levels assigned to certain characters as the structure is + determined from the succession of headings. However, this convention is used in Python’s + Style Guide for documenting which you may follow: + + # with overline, for parts + * for chapters + = for sections + - for subsections + ^ for subsubsections + " for paragraphs + +######################### +Removing backup snapshots +######################### + +All backup space is finite, so restic allows removing old snapshots. +This can be done either manually (by specifying a snapshot ID to remove) +or by using a policy that describes which snapshots to forget. For all +remove operations, two commands need to be called in sequence: +``forget`` to remove a snapshot and ``prune`` to actually remove the +data that was referenced by the snapshot from the repository. This can +be automated with the ``--prune`` option of the ``forget`` command, +which runs ``prune`` automatically if snapshots have been removed. + +.. Warning:: + + Pruning snapshots can be a very time-consuming process, taking nearly + as long as backups themselves. During a prune operation, the index is + locked and backups cannot be completed. Performance improvements are + planned for this feature. + +It is advisable to run ``restic check`` after pruning, to make sure +you are alerted, should the internal data structures of the repository +be damaged. + +Remove a single snapshot +************************ + +The command ``snapshots`` can be used to list all snapshots in a +repository like this: + +.. code-block:: console + + $ restic -r /srv/restic-repo snapshots + enter password for repository: + ID Date Host Tags Directory + ---------------------------------------------------------------------- + 40dc1520 2015-05-08 21:38:30 kasimir /home/user/work + 79766175 2015-05-08 21:40:19 kasimir /home/user/work + bdbd3439 2015-05-08 21:45:17 luigi /home/art + 590c8fc8 2015-05-08 21:47:38 kazik /srv + 9f0bc19e 2015-05-08 21:46:11 luigi /srv + +In order to remove the snapshot of ``/home/art``, use the ``forget`` +command and specify the snapshot ID on the command line: + +.. code-block:: console + + $ restic -r /srv/restic-repo forget bdbd3439 + enter password for repository: + removed snapshot d3f01f63 + +Afterwards this snapshot is removed: + +.. code-block:: console + + $ restic -r /srv/restic-repo snapshots + enter password for repository: + ID Date Host Tags Directory + ---------------------------------------------------------------------- + 40dc1520 2015-05-08 21:38:30 kasimir /home/user/work + 79766175 2015-05-08 21:40:19 kasimir /home/user/work + 590c8fc8 2015-05-08 21:47:38 kazik /srv + 9f0bc19e 2015-05-08 21:46:11 luigi /srv + +But the data that was referenced by files in this snapshot is still +stored in the repository. To cleanup unreferenced data, the ``prune`` +command must be run: + +.. code-block:: console + + $ restic -r /srv/restic-repo prune + enter password for repository: + + counting files in repo + building new index for repo + [0:00] 100.00% 22 / 22 files + repository contains 22 packs (8512 blobs) with 100.092 MiB bytes + processed 8512 blobs: 0 duplicate blobs, 0B duplicate + load all snapshots + find data that is still in use for 1 snapshots + [0:00] 100.00% 1 / 1 snapshots + found 8433 of 8512 data blobs still in use + will rewrite 3 packs + creating new index + [0:00] 86.36% 19 / 22 files + saved new index as 544a5084 + done + +Afterwards the repository is smaller. + +You can automate this two-step process by using the ``--prune`` switch +to ``forget``: + +.. code-block:: console + + $ restic forget --keep-last 1 --prune + snapshots for host mopped, directories /home/user/work: + + keep 1 snapshots: + ID Date Host Tags Directory + ---------------------------------------------------------------------- + 4bba301e 2017-02-21 10:49:18 mopped /home/user/work + + remove 1 snapshots: + ID Date Host Tags Directory + ---------------------------------------------------------------------- + 8c02b94b 2017-02-21 10:48:33 mopped /home/user/work + + 1 snapshots have been removed, running prune + counting files in repo + building new index for repo + [0:00] 100.00% 37 / 37 packs + repository contains 37 packs (5521 blobs) with 151.012 MiB bytes + processed 5521 blobs: 0 duplicate blobs, 0B duplicate + load all snapshots + find data that is still in use for 1 snapshots + [0:00] 100.00% 1 / 1 snapshots + found 5323 of 5521 data blobs still in use, removing 198 blobs + will delete 0 packs and rewrite 27 packs, this frees 22.106 MiB + creating new index + [0:00] 100.00% 30 / 30 packs + saved new index as b49f3e68 + done + +Removing snapshots according to a policy +**************************************** + +Removing snapshots manually is tedious and error-prone, therefore restic +allows specifying which snapshots should be removed automatically +according to a policy. You can specify how many hourly, daily, weekly, +monthly and yearly snapshots to keep, any other snapshots are removed. +The most important command-line parameter here is ``--dry-run`` which +instructs restic to not remove anything but print which snapshots would +be removed. + +When ``forget`` is run with a policy, restic loads the list of all +snapshots, then groups these by host name and list of directories. The grouping +options can be set with ``--group-by``, to only group snapshots by paths and +tags use ``--group-by paths,tags``. The policy is then applied to each group of +snapshots separately. This is a safety feature. + +The ``forget`` command accepts the following parameters: + +- ``--keep-last n`` never delete the ``n`` last (most recent) snapshots +- ``--keep-hourly n`` for the last ``n`` hours in which a snapshot was + made, keep only the last snapshot for each hour. +- ``--keep-daily n`` for the last ``n`` days which have one or more + snapshots, only keep the last one for that day. +- ``--keep-weekly n`` for the last ``n`` weeks which have one or more + snapshots, only keep the last one for that week. +- ``--keep-monthly n`` for the last ``n`` months which have one or more + snapshots, only keep the last one for that month. +- ``--keep-yearly n`` for the last ``n`` years which have one or more + snapshots, only keep the last one for that year. +- ``--keep-tag`` keep all snapshots which have all tags specified by + this option (can be specified multiple times). +- ``--keep-within duration`` keep all snapshots which have been made within + the duration of the latest snapshot. ``duration`` needs to be a number of + years, months, days, and hours, e.g. ``2y5m7d3h`` will keep all snapshots + made in the two years, five months, seven days, and three hours before the + latest snapshot. + +Multiple policies will be ORed together so as to be as inclusive as possible +for keeping snapshots. + +Additionally, you can restrict removing snapshots to those which have a +particular hostname with the ``--hostname`` parameter, or tags with the +``--tag`` option. When multiple tags are specified, only the snapshots +which have all the tags are considered. For example, the following command +removes all but the latest snapshot of all snapshots that have the tag ``foo``: + +.. code-block:: console + + $ restic forget --tag foo --keep-last 1 + +This command removes all but the last snapshot of all snapshots that have +either the ``foo`` or ``bar`` tag set: + +.. code-block:: console + + $ restic forget --tag foo --tag bar --keep-last 1 + +To only keep the last snapshot of all snapshots with both the tag ``foo`` and +``bar`` set use: + +.. code-block:: console + + $ restic forget --tag foo,tag bar --keep-last 1 + +All the ``--keep-*`` options above only count +hours/days/weeks/months/years which have a snapshot, so those without a +snapshot are ignored. + +For safety reasons, restic refuses to act on an "empty" policy. For example, +if one were to specify ``--keep-last 0`` to forget *all* snapshots in the +repository, restic will respond that no snapshots will be removed. To delete +all snapshots, use ``--keep-last 1`` and then finally remove the last +snapshot ID manually (by passing the ID to ``forget``). + +All snapshots are evaluated against all matching ``--keep-*`` counts. A +single snapshot on 2017-09-30 (Sun) will count as a daily, weekly and monthly. + +Let's explain this with an example: Suppose you have only made a backup +on each Sunday for 12 weeks. Then ``forget --keep-daily 4`` will keep +the last four snapshots for the last four Sundays, but remove the rest. +Only counting the days which have a backup and ignore the ones without +is a safety feature: it prevents restic from removing many snapshots +when no new ones are created. If it was implemented otherwise, running +``forget --keep-daily 4`` on a Friday would remove all snapshots! + +Another example: Suppose you make daily backups for 100 years. Then +``forget --keep-daily 7 --keep-weekly 5 --keep-monthly 12 --keep-yearly 75`` +will keep the most recent 7 daily snapshots, then 4 (remember, 7 dailies +already include a week!) last-day-of-the-weeks and 11 or 12 +last-day-of-the-months (11 or 12 depends if the 5 weeklies cross a month). +And finally 75 last-day-of-the-year snapshots. All other snapshots are +removed. + diff --git a/doc/070_encryption.rst b/doc/070_encryption.rst new file mode 100644 index 000000000..a7b8716ac --- /dev/null +++ b/doc/070_encryption.rst @@ -0,0 +1,51 @@ +.. + Normally, there are no heading levels assigned to certain characters as the structure is + determined from the succession of headings. However, this convention is used in Python’s + Style Guide for documenting which you may follow: + + # with overline, for parts + * for chapters + = for sections + - for subsections + ^ for subsubsections + " for paragraphs + +########## +Encryption +########## + + +*"The design might not be perfect, but it’s good. Encryption is a first-class feature, +the implementation looks sane and I guess the deduplication trade-off is worth +it. So… I’m going to use restic for my personal backups.*" `Filippo Valsorda`_ + +.. _Filippo Valsorda: https://blog.filippo.io/restic-cryptography/ + +********************** +Manage repository keys +********************** + +The ``key`` command allows you to set multiple access keys or passwords +per repository. In fact, you can use the ``list``, ``add``, ``remove``, and +``passwd`` (changes a password) sub-commands to manage these keys very precisely: + +.. code-block:: console + + $ restic -r /srv/restic-repo key list + enter password for repository: + ID User Host Created + ---------------------------------------------------------------------- + *eb78040b username kasimir 2015-08-12 13:29:57 + + $ restic -r /srv/restic-repo key add + enter password for repository: + enter password for new key: + enter password again: + saved new key as + + $ restic -r /srv/restic-repo key list + enter password for repository: + ID User Host Created + ---------------------------------------------------------------------- + 5c657874 username kasimir 2015-08-12 13:35:05 + *eb78040b username kasimir 2015-08-12 13:29:57 diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst new file mode 100644 index 000000000..712a70244 --- /dev/null +++ b/doc/075_scripting.rst @@ -0,0 +1,39 @@ +.. + Normally, there are no heading levels assigned to certain characters as the structure is + determined from the succession of headings. However, this convention is used in Python’s + Style Guide for documenting which you may follow: + + # with overline, for parts + * for chapters + = for sections + - for subsections + ^ for subsubsections + " for paragraphs + +######################### +Scripting +######################### + +This is a list of how certain tasks may be accomplished when you use +restic via scripts. + +Check if a repository is already initialized +******************************************** + +You may find a need to check if a repository is already initialized, +perhaps to prevent your script from initializing a repository multiple +times. The command ``snapshots`` may be used for this purpose: + +.. code-block:: console + + $ restic -r /srv/restic-repo snapshots + Fatal: unable to open config file: Stat: stat /srv/restic-repo/config: no such file or directory + Is there a repository at the following location? + /srv/restic-repo + +If a repository does not exist, restic will return a non-zero exit code +and print an error message. Note that restic will also return a non-zero +exit code if a different error is encountered (e.g.: incorrect password +to ``snapshots``) and it may print a different error message. If there +are no errors, restic will return a zero exit code and print all the +snapshots. diff --git a/doc/080_examples.rst b/doc/080_examples.rst new file mode 100644 index 000000000..875666d65 --- /dev/null +++ b/doc/080_examples.rst @@ -0,0 +1,342 @@ +.. + Normally, there are no heading levels assigned to certain characters as the structure is + determined from the succession of headings. However, this convention is used in Python’s + Style Guide for documenting which you may follow: + + # with overline, for parts + * for chapters + = for sections + - for subsections + ^ for subsubsections + " for paragraphs + +######## +Examples +######## + +******************************** +Setting up restic with Amazon S3 +******************************** + +Preface +======= + +This tutorial will show you how to use restic with AWS S3. It will show you how +to navigate the AWS web interface, create an S3 bucket, create a user with +access to only this bucket, and finally how to connect restic to this bucket. + +Prerequisites +============= + +You should already have a ``restic`` binary available on your system that you can +run. Furthermore, you should also have an account with +`AWS `__. You will likely need to provide credit card +details for billing purposes, even if you use their +`free-tier `__. + + +Logging into AWS +================ + +Point your browser to +https://console.aws.amazon.com +and log in using your AWS account. You will be presented with the AWS homepage: + +.. image:: images/aws_s3/01_aws_start.png + :alt: AWS Homepage + +By using the "Services" button in the upper left corder, a menu of all services +provided by AWS can be opened: + +.. image:: images/aws_s3/02_aws_menu.png + :alt: AWS Services Menu + +For this tutorial, the Simple Storage Service (S3), as well as Identity and +Access Management (IAM) are relevant. + + +Creating the bucket +=================== + +First, a bucket to store your backups in must be created. Using the "Services" +menu, navigate to S3. In case you already have some S3 buckets, you will see a +list of them here: + +.. image:: images/aws_s3/03_buckets_list_before.png + :alt: List of S3 Buckets + +Click the "Create bucket" button and choose a name and region for your new +bucket. For the purpose of this tutorial, the bucket will be named +``restic-demo`` and reside in Frankfurt. Because the bucket name space is +shared among all AWS users, the name ``restic-demo`` may not be available to +you. Be creative and choose a unique bucket name. + +.. image:: images/aws_s3/04_bucket_create_start.png + :alt: Create a Bucket + +It is not necessary to configure any special properties or permissions of the +bucket just yet. Therefore, just finish the wizard without making any further +changes: + +.. image:: images/aws_s3/05_bucket_create_review.png + :alt: Review Bucket Creation + +The newly created ``restic-demo`` bucket will now appear on the list of S3 +buckets: + +.. image:: images/aws_s3/06_buckets_list_after.png + :alt: List With New Bucket + +Creating a user +=============== + +Use the "Services" menu of the AWS web interface to navigate to IAM. This will +bring you to the IAM homepage. To create a new user, click on the "Users" menu +entry on the left: + +.. image:: images/aws_s3/07_iam_start.png + :alt: IAM Home Page + +In case you already have set-up users with IAM before, you will see a list of +them here. Use the "Add user" button at the top to create a new user: + +.. image:: images/aws_s3/08_user_list.png + :alt: IAM User List + +For this tutorial, the new user will be named ``restic-demo-user``. Feel free to +choose your own name that best fits your needs. This user will only ever access +AWS through the ``restic`` program and not through the web interface. Therefore, +"Programmatic access" is selected for "Access type": + +.. image:: images/aws_s3/09_user_name.png + :alt: Choose User Name and Access Type + +During the next step, permissions can be assigned to the new user. To use this +user with restic, it only needs access to the ``restic-demo`` bucket. Select +"Attach existing policies directly", which will bring up a list of pre-defined +policies below. Afterwards, click the "Create policy" button to create a custom +policy: + +.. image:: images/aws_s3/10_user_pre_policy.png + :alt: Assign a Policy + +A new browser window or tab will open with the policy wizard. In Amazon IAM, +policies are defined as JSON documents. For this tutorial, the "Visual editor" +will be used to generate a policy: + +.. image:: images/aws_s3/11_policy_start.png + :alt: Create a New Policy + +For restic to work, two permission statements must be created using the visual +policy editor. The first statement is set up as follows: + +.. code:: + + Service: S3 + Allow Actions: DeleteObject, GetObject, PutObject + Resources: arn:aws:s3:::restic-demo/* + +This statement allows restic to create, read and delete objects inside the S3 +bucket named ``restic-demo``. Adjust the bucket's name to the name of the +bucket you created earlier. Next, add a second statement using the "Add +additional permissions" button: + +.. code:: + + Service: S3 + Allow Actions: ListBucket, GetBucketLocation + Resource: arn:aws:s3:::restic-demo + +Again, substitute ``restic-demo`` with the actual name of your bucket. Note +that, unlike before, there is no ``/*`` after the bucket name. This statement +allows restic to list the objects stored in the ``restic-demo`` bucket and to +query the bucket's region. + +Continue to the next step by clicking the "Review policy" button and enter a +name and description for this policy. For this tutorial, the policy will be +named ``restic-demo-policy``. Click "Create policy" to finish the process: + +.. image:: images/aws_s3/13_policy_review.png + :alt: Policy Review + +Go back to the browser window or tab where you were previously creating the new +user. Click the button labeled "Refresh" above the list of policies to make +sure the newly created policy is available to you. Afterwards, use the search +function to search for the ``restic-demo-policy``. Select this policy using the +checkbox on the left. Then, continue to the next step. + +.. image:: images/aws_s3/14_user_attach_policy.png + :alt: Attach Policy to User + +The next page will present an overview of the user account that is about to be +created. If everything looks good, click "Create user" to complete the process: + +.. image:: images/aws_s3/15_user_review.png + :alt: User Creation Review + +After the user has been created, its access credentials will be displayed. They +consist of the "Access key ID" (think user name), and the "Secret access key" +(think password). Copy these down to a safe place. + +.. image:: images/aws_s3/16_user_created.png + :alt: User Credentials + +You have now completed the configuration in AWS. Feel free to close your web +browser now. + + +Initializing the restic repository +================================== + +Open a terminal and make sure you have the ``restic`` binary ready. First, choose +a password to encrypt your backups with. In this tutorial, ``apg`` is used for +this purpose: + +.. code-block:: console + + $ apg -a 1 -m 32 -n 1 -M NCL + I9n7G7G0ZpDWA3GOcJbIuwQCGvGUBkU5 + +Note this password somewhere safe along with your AWS credentials. Next, the +configuration of restic will be placed into environment variables. This will +include sensitive information, such as your AWS secret and repository password. +Therefore, make sure the next commands **do not** end up in your shell's +history file. Adjust the contents of the environment variables to fit your +bucket's name and your user's API credentials. + +.. code-block:: console + + $ unset HISTFILE + $ export RESTIC_REPOSITORY="s3:https://s3.amazonaws.com/restic-demo" + $ export AWS_ACCESS_KEY_ID="AKIAJAJSLTZCAZ4SRI5Q" + $ export AWS_SECRET_ACCESS_KEY="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" + $ export RESTIC_PASSWORD="I9n7G7G0ZpDWA3GOcJbIuwQCGvGUBkU5" + + +After the environment is set up, restic may be called to initialize the +repository: + + +.. code-block:: console + + $ ./restic init + created restic backend b5c661a86a at s3:https://s3.amazonaws.com/restic-demo + + Please note that knowledge of your password is required to access + the repository. Losing your password means that your data is + irrecoverably lost. + +restic is now ready to be used with AWS S3. Try to create a backup: + +.. code-block:: console + + $ dd if=/dev/urandom bs=1M count=10 of=test.bin + 10+0 records in + 10+0 records out + 10485760 bytes (10 MB, 10 MiB) copied, 0,0891322 s, 118 MB/s + + $ ./restic backup test.bin + scan [/home/philip/restic-demo/test.bin] + scanned 0 directories, 1 files in 0:00 + [0:04] 100.00% 2.500 MiB/s 10.000 MiB / 10.000 MiB 1 / 1 items ... ETA 0:00 + duration: 0:04, 2.47MiB/s + snapshot 10fdbace saved + + $ ./restic snapshots + ID Date Host Tags Directory + ---------------------------------------------------------------------- + 10fdbace 2017-03-26 16:41:50 blackbox /home/philip/restic-demo/test.bin + +A snapshot was created and stored in the S3 bucket. This snapshot may now be +restored: + +.. code-block:: console + + $ mkdir restore + + $ ./restic restore 10fdbace --target restore + restoring to restore + + $ ls restore/ + test.bin + +The snapshot was successfully restored. This concludes the tutorial. + + +***************************************************** +Backing up your system without running restic as root +***************************************************** + +Motivation +========== + +Creating a complete backup of a machine requires a privileged process +that is able to read all files. On UNIX-like systems this is +traditionally the ``root`` user. Processes running as root have +superpower. They cannot only read all files but do also have the power +to modify the system in any possible way. + +With great power comes great responsibility. If a process running as +root malfunctions, is exploited, or simply configured in a wrong way it +can cause any possible damage to the system. This means you only want +to run programs as root that you trust completely. And even if you +trust a program, it is good and common practice to run it with the +least possible privileges. + +Capabilities on Linux +===================== + +Fortunately, Linux has functionality to divide root's power into +single separate *capabilities*. You can remove these from a process +running as root to restrict it. And you can add capabilities to a +process running as a normal user, which is what we are going to do. + +Full backup without root +======================== + +To be able to completely backup a system, restic has to read all the +files. Luckily Linux knows a capability that allows precisely this. We +can assign this single capability to restic and then run it as an +unprivileged user. + +First we create a new user called ``restic`` that is going to create +the backups: + +.. code-block:: console + + root@a3e580b6369d:/# useradd -m restic + +Then we download and install the restic binary into the user's home +directory. + +.. code-block:: console + + root@a3e580b6369d:/# mkdir ~restic/bin + root@a3e580b6369d:/# curl -L https://github.com/restic/restic/releases/download/v0.9.1/restic_0.9.1_linux_amd64.bz2 | bunzip2 > ~restic/bin/restic + +Before we assign any special capability to the restic binary we +restrict its permissions so that only root and the newly created +restic user can execute it. Otherwise another - possibly untrusted - +user could misuse the privileged restic binary to circumvent file +access controls. + +.. code-block:: console + + root@a3e580b6369d:/# chown root:restic ~restic/bin/restic + root@a3e580b6369d:/# chmod 750 ~restic/bin/restic + +Finally we can use ``setcap`` to add an extended attribute to the +restic binary. On every execution the system will read the extended +attribute, interpret it and assign capabilities accordingly. + +.. code-block:: console + + root@a3e580b6369d:/# setcap cap_dac_read_search=+ep ~restic/bin/restic + +From now on the user ``restic`` can run restic to backup the whole +system. + +.. code-block:: console + + root@a3e580b6369d:/# sudo -u restic /home/restic/bin/restic --exclude={/dev,/media,/mnt,/proc,/run,/sys,/tmp,/var/tmp} -r /tmp backup / + diff --git a/doc/090_participating.rst b/doc/090_participating.rst new file mode 100644 index 000000000..c909dd4a3 --- /dev/null +++ b/doc/090_participating.rst @@ -0,0 +1,142 @@ +.. + Normally, there are no heading levels assigned to certain characters as the structure is + determined from the succession of headings. However, this convention is used in Python’s + Style Guide for documenting which you may follow: + + # with overline, for parts + * for chapters + = for sections + - for subsections + ^ for subsubsections + " for paragraphs + +############# +Participating +############# + +********* +Debugging +********* + +The program can be built with debug support like this: + +.. code-block:: console + + $ go run build.go -mod=vendor -tags debug + +For Go < 1.11, the option ``-mod=vendor`` needs to be removed. + +Afterwards, extensive debug messages are written to the file in +environment variable ``DEBUG_LOG``, e.g.: + +.. code-block:: console + + $ DEBUG_LOG=/tmp/restic-debug.log restic backup ~/work + +If you suspect that there is a bug, you can have a look at the debug +log. Please be aware that the debug log might contain sensitive +information such as file and directory names. + +The debug log will always contain all log messages restic generates. You +can also instruct restic to print some or all debug messages to stderr. +These can also be limited to e.g. a list of source files or a list of +patterns for function names. The patterns are globbing patterns (see the +documentation for `path.Glob `__), multiple +patterns are separated by commas. Patterns are case sensitive. + +Printing all log messages to the console can be achieved by setting the +file filter to ``*``: + +.. code-block:: console + + $ DEBUG_FILES=* restic check + +If you want restic to just print all debug log messages from the files +``main.go`` and ``lock.go``, set the environment variable +``DEBUG_FILES`` like this: + +.. code-block:: console + + $ DEBUG_FILES=main.go,lock.go restic check + +The following command line instructs restic to only print debug +statements originating in functions that match the pattern ``*unlock*`` +(case sensitive): + +.. code-block:: console + + $ DEBUG_FUNCS=*unlock* restic check + + +************ +Contributing +************ + +Contributions are welcome! Please **open an issue first** (or add a +comment to an existing issue) if you plan to work on any code or add a +new feature. This way, duplicate work is prevented and we can discuss +your ideas and design first. + +More information and a description of the development environment can be +found in `CONTRIBUTING.md `__. +A document describing the design of restic and the data structures stored on the +back end is contained in `Design `__. + +If you'd like to start contributing to restic, but don't know exactly +what do to, have a look at this great article by Dave Cheney: +`Suggestions for contributing to an Open Source +project `__ +A few issues have been tagged with the label ``help wanted``, you can +start looking at those: +https://github.com/restic/restic/labels/help%20wanted + +******** +Security +******** + +**Important**: If you discover something that you believe to be a +possible critical security problem, please do *not* open a GitHub issue +but send an email directly to alexander@bumpern.de. If possible, please +encrypt your email using the following PGP key +(`0x91A6868BD3F7A907 `__): + +:: + + pub 4096R/91A6868BD3F7A907 2014-11-01 + Key fingerprint = CF8F 18F2 8445 7597 3F79 D4E1 91A6 868B D3F7 A907 + uid Alexander Neumann + sub 4096R/D5FC2ACF4043FDF1 2014-11-01 + +************* +Compatibility +************* + +Backward compatibility for backups is important so that our users are +always able to restore saved data. Therefore restic follows `Semantic +Versioning `__ to clearly define which versions are +compatible. The repository and data structures contained therein are +considered the "Public API" in the sense of Semantic Versioning. This +goes for all released versions of restic, this may not be the case for +the master branch. + +We guarantee backward compatibility of all repositories within one major +version; as long as we do not increment the major version, data can be +read and restored. We strive to be fully backward compatible to all +prior versions. + +********************** +Building documentation +********************** + +The restic documentation is built with `Sphinx `__, +therefore building it locally requires a recent Python version and requirements listed in ``doc/requirements.txt``. +This example will guide you through the process using `virtualenv `__: + +:: + + $ virtualenv venv # create virtual python environment + $ source venv/bin/activate # activate the virtual environment + $ cd doc + $ pip install -r requirements.txt # install dependencies + $ make html # build html documentation + $ # open _build/html/index.html with your favorite browser diff --git a/doc/100_references.rst b/doc/100_references.rst new file mode 100644 index 000000000..4be4e0dae --- /dev/null +++ b/doc/100_references.rst @@ -0,0 +1,23 @@ +.. + Normally, there are no heading levels assigned to certain characters as the structure is + determined from the succession of headings. However, this convention is used in Python’s + Style Guide for documenting which you may follow: + + # with overline, for parts + * for chapters + = for sections + - for subsections + ^ for subsubsections + " for paragraphs + +########## +References +########## + +****** +Design +****** + +.. include:: design.rst +.. include:: cache.rst +.. include:: REST_backend.rst diff --git a/doc/110_talks.rst b/doc/110_talks.rst new file mode 100644 index 000000000..06952896f --- /dev/null +++ b/doc/110_talks.rst @@ -0,0 +1,34 @@ +.. + Normally, there are no heading levels assigned to certain characters as the structure is + determined from the succession of headings. However, this convention is used in Python’s + Style Guide for documenting which you may follow: + + # with overline, for parts + * for chapters + = for sections + - for subsections + ^ for subsubsections + " for paragraphs + + +##### +Talks +##### + +The following talks will be or have been given about restic: + +- 2016-01-31: Lightning Talk at the Go Devroom at FOSDEM 2016, + Brussels, Belgium +- 2016-01-29: `restic - Backups mal + richtig `__: + Public lecture in German at `CCC Cologne + e.V. `__ in Cologne, Germany +- 2015-08-23: `A Solution to the Backup + Inconvenience `__: + Lecture at `FROSCON 2015 `__ in Bonn, Germany +- 2015-02-01: `Lightning Talk at FOSDEM + 2015 `__: A + short introduction (with slightly outdated command line) +- 2015-01-27: `Talk about restic at CCC + Aachen `__ + (in German) diff --git a/doc/Makefile b/doc/Makefile new file mode 100644 index 000000000..a296c0742 --- /dev/null +++ b/doc/Makefile @@ -0,0 +1,25 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +SPHINXPROJ = restic +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: autobuild + +autobuild: + sphinx-autobuild -b html -i '.doctrees/*' . _build diff --git a/doc/REST_backend.rst b/doc/REST_backend.rst new file mode 100644 index 000000000..f9d72cf06 --- /dev/null +++ b/doc/REST_backend.rst @@ -0,0 +1,145 @@ +************ +REST Backend +************ + +Restic can interact with HTTP Backend that respects the following REST +API. + +The following values are valid for ``{type}``: + + * ``data`` + * ``keys`` + * ``locks`` + * ``snapshots`` + * ``index`` + * ``config`` + +The API version is selected via the ``Accept`` HTTP header in the request. The +following values are defined: + + * ``application/vnd.x.restic.rest.v1`` or empty: Select API version 1 + * ``application/vnd.x.restic.rest.v2``: Select API version 2 + +The server will respond with the value of the highest version it supports in +the ``Content-Type`` HTTP response header for the HTTP requests which should +return JSON. Any different value for this header means API version 1. + +The placeholder ``{path}`` in this document is a path to the repository, so +that multiple different repositories can be accessed. The default path is +``/``. The path must end with a slash. + +POST {path}?create=true +======================= + +This request is used to initially create a new repository. The server +responds with "200 OK" if the repository structure was created +successfully or already exists, otherwise an error is returned. + +DELETE {path} +============= + +Deletes the repository on the server side. The server responds with "200 +OK" if the repository was successfully removed. If this function is not +implemented the server returns "501 Not Implemented", if this it is +denied by the server it returns "403 Forbidden". + +HEAD {path}/config +================== + +Returns "200 OK" if the repository has a configuration, an HTTP error +otherwise. + +GET {path}/config +================= + +Returns the content of the configuration file if the repository has a +configuration, an HTTP error otherwise. + +Response format: binary/octet-stream + +POST {path}/config +================== + +Returns "200 OK" if the configuration of the request body has been +saved, an HTTP error otherwise. + +GET {path}/{type}/ +================== + +API version 1 +------------- + +Returns a JSON array containing the names of all the blobs stored for a given +type, example: + +.. code:: json + + [ + "245bc4c430d393f74fbe7b13325e30dbde9fb0745e50caad57c446c93d20096b", + "85b420239efa1132c41cea0065452a40ebc20c6f8e0b132a5b2f5848360973ec", + "8e2006bb5931a520f3c7009fe278d1ebb87eb72c3ff92a50c30e90f1b8cf3e60", + "e75c8c407ea31ba399ab4109f28dd18c4c68303d8d86cc275432820c42ce3649" + ] + +API version 2 +------------- + +Returns a JSON array containing an object for each file of the given type. The +objects have two keys: ``name`` for the file name, and ``size`` for the size in +bytes. + +.. code:: json + + [ + { + "name": "245bc4c430d393f74fbe7b13325e30dbde9fb0745e50caad57c446c93d20096b", + "size": 2341058 + }, + { + "name": "85b420239efa1132c41cea0065452a40ebc20c6f8e0b132a5b2f5848360973ec", + "size": 2908900 + }, + { + "name": "8e2006bb5931a520f3c7009fe278d1ebb87eb72c3ff92a50c30e90f1b8cf3e60", + "size": 3030712 + }, + { + "name": "e75c8c407ea31ba399ab4109f28dd18c4c68303d8d86cc275432820c42ce3649", + "size": 2804 + } + ] + +HEAD {path}/{type}/{name} +========================= + +Returns "200 OK" if the blob with the given name and type is stored in +the repository, "404 not found" otherwise. If the blob exists, the HTTP +header ``Content-Length`` is set to the file size. + +GET {path}/{type}/{name} +======================== + +Returns the content of the blob with the given name and type if it is +stored in the repository, "404 not found" otherwise. + +If the request specifies a partial read with a Range header field, then +the status code of the response is 206 instead of 200 and the response +only contains the specified range. + +Response format: binary/octet-stream + +POST {path}/{type}/{name} +========================= + +Saves the content of the request body as a blob with the given name and +type, an HTTP error otherwise. + +Request format: binary/octet-stream + +DELETE {path}/{type}/{name} +=========================== + +Returns "200 OK" if the blob with the given name and type has been +deleted from the repository, an HTTP error otherwise. + + diff --git a/doc/_static/css/restic.css b/doc/_static/css/restic.css new file mode 100644 index 000000000..a4cf25421 --- /dev/null +++ b/doc/_static/css/restic.css @@ -0,0 +1,10 @@ +@import url('theme.css'); + +.wy-side-nav-search { + background-color: #0000b4; +} + +.logo { + height: 50% !important; + width: 50% !important; +} diff --git a/doc/_static/favicon.ico b/doc/_static/favicon.ico new file mode 100644 index 000000000..d706a157d Binary files /dev/null and b/doc/_static/favicon.ico differ diff --git a/doc/bash-completion.sh b/doc/bash-completion.sh new file mode 100644 index 000000000..11806685f --- /dev/null +++ b/doc/bash-completion.sh @@ -0,0 +1,1663 @@ +# bash completion for restic -*- shell-script -*- + +__restic_debug() +{ + if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then + echo "$*" >> "${BASH_COMP_DEBUG_FILE}" + fi +} + +# Homebrew on Macs have version 1.3 of bash-completion which doesn't include +# _init_completion. This is a very minimal version of that function. +__restic_init_completion() +{ + COMPREPLY=() + _get_comp_words_by_ref "$@" cur prev words cword +} + +__restic_index_of_word() +{ + local w word=$1 + shift + index=0 + for w in "$@"; do + [[ $w = "$word" ]] && return + index=$((index+1)) + done + index=-1 +} + +__restic_contains_word() +{ + local w word=$1; shift + for w in "$@"; do + [[ $w = "$word" ]] && return + done + return 1 +} + +__restic_handle_reply() +{ + __restic_debug "${FUNCNAME[0]}" + case $cur in + -*) + if [[ $(type -t compopt) = "builtin" ]]; then + compopt -o nospace + fi + local allflags + if [ ${#must_have_one_flag[@]} -ne 0 ]; then + allflags=("${must_have_one_flag[@]}") + else + allflags=("${flags[*]} ${two_word_flags[*]}") + fi + COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") ) + if [[ $(type -t compopt) = "builtin" ]]; then + [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace + fi + + # complete after --flag=abc + if [[ $cur == *=* ]]; then + if [[ $(type -t compopt) = "builtin" ]]; then + compopt +o nospace + fi + + local index flag + flag="${cur%=*}" + __restic_index_of_word "${flag}" "${flags_with_completion[@]}" + COMPREPLY=() + if [[ ${index} -ge 0 ]]; then + PREFIX="" + cur="${cur#*=}" + ${flags_completion[${index}]} + if [ -n "${ZSH_VERSION}" ]; then + # zsh completion needs --flag= prefix + eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )" + fi + fi + fi + return 0; + ;; + esac + + # check if we are handling a flag with special work handling + local index + __restic_index_of_word "${prev}" "${flags_with_completion[@]}" + if [[ ${index} -ge 0 ]]; then + ${flags_completion[${index}]} + return + fi + + # we are parsing a flag and don't have a special handler, no completion + if [[ ${cur} != "${words[cword]}" ]]; then + return + fi + + local completions + completions=("${commands[@]}") + if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then + completions=("${must_have_one_noun[@]}") + fi + if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then + completions+=("${must_have_one_flag[@]}") + fi + COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") ) + + if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then + COMPREPLY=( $(compgen -W "${noun_aliases[*]}" -- "$cur") ) + fi + + if [[ ${#COMPREPLY[@]} -eq 0 ]]; then + declare -F __custom_func >/dev/null && __custom_func + fi + + # available in bash-completion >= 2, not always present on macOS + if declare -F __ltrim_colon_completions >/dev/null; then + __ltrim_colon_completions "$cur" + fi + + # If there is only 1 completion and it is a flag with an = it will be completed + # but we don't want a space after the = + if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then + compopt -o nospace + fi +} + +# The arguments should be in the form "ext1|ext2|extn" +__restic_handle_filename_extension_flag() +{ + local ext="$1" + _filedir "@(${ext})" +} + +__restic_handle_subdirs_in_dir_flag() +{ + local dir="$1" + pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 +} + +__restic_handle_flag() +{ + __restic_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + + # if a command required a flag, and we found it, unset must_have_one_flag() + local flagname=${words[c]} + local flagvalue + # if the word contained an = + if [[ ${words[c]} == *"="* ]]; then + flagvalue=${flagname#*=} # take in as flagvalue after the = + flagname=${flagname%=*} # strip everything after the = + flagname="${flagname}=" # but put the = back + fi + __restic_debug "${FUNCNAME[0]}: looking for ${flagname}" + if __restic_contains_word "${flagname}" "${must_have_one_flag[@]}"; then + must_have_one_flag=() + fi + + # if you set a flag which only applies to this command, don't show subcommands + if __restic_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then + commands=() + fi + + # keep flag value with flagname as flaghash + # flaghash variable is an associative array which is only supported in bash > 3. + if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then + if [ -n "${flagvalue}" ] ; then + flaghash[${flagname}]=${flagvalue} + elif [ -n "${words[ $((c+1)) ]}" ] ; then + flaghash[${flagname}]=${words[ $((c+1)) ]} + else + flaghash[${flagname}]="true" # pad "true" for bool flag + fi + fi + + # skip the argument to a two word flag + if __restic_contains_word "${words[c]}" "${two_word_flags[@]}"; then + c=$((c+1)) + # if we are looking for a flags value, don't show commands + if [[ $c -eq $cword ]]; then + commands=() + fi + fi + + c=$((c+1)) + +} + +__restic_handle_noun() +{ + __restic_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + + if __restic_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then + must_have_one_noun=() + elif __restic_contains_word "${words[c]}" "${noun_aliases[@]}"; then + must_have_one_noun=() + fi + + nouns+=("${words[c]}") + c=$((c+1)) +} + +__restic_handle_command() +{ + __restic_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + + local next_command + if [[ -n ${last_command} ]]; then + next_command="_${last_command}_${words[c]//:/__}" + else + if [[ $c -eq 0 ]]; then + next_command="_restic_root_command" + else + next_command="_${words[c]//:/__}" + fi + fi + c=$((c+1)) + __restic_debug "${FUNCNAME[0]}: looking for ${next_command}" + declare -F "$next_command" >/dev/null && $next_command +} + +__restic_handle_word() +{ + if [[ $c -ge $cword ]]; then + __restic_handle_reply + return + fi + __restic_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + if [[ "${words[c]}" == -* ]]; then + __restic_handle_flag + elif __restic_contains_word "${words[c]}" "${commands[@]}"; then + __restic_handle_command + elif [[ $c -eq 0 ]]; then + __restic_handle_command + elif __restic_contains_word "${words[c]}" "${command_aliases[@]}"; then + # aliashash variable is an associative array which is only supported in bash > 3. + if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then + words[c]=${aliashash[${words[c]}]} + __restic_handle_command + else + __restic_handle_noun + fi + else + __restic_handle_noun + fi + __restic_handle_word +} + +_restic_backup() +{ + last_command="restic_backup" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--exclude=") + two_word_flags+=("-e") + local_nonpersistent_flags+=("--exclude=") + flags+=("--exclude-caches") + local_nonpersistent_flags+=("--exclude-caches") + flags+=("--exclude-file=") + local_nonpersistent_flags+=("--exclude-file=") + flags+=("--exclude-if-present=") + local_nonpersistent_flags+=("--exclude-if-present=") + flags+=("--files-from=") + local_nonpersistent_flags+=("--files-from=") + flags+=("--force") + flags+=("-f") + local_nonpersistent_flags+=("--force") + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + flags+=("--host=") + two_word_flags+=("-H") + local_nonpersistent_flags+=("--host=") + flags+=("--one-file-system") + flags+=("-x") + local_nonpersistent_flags+=("--one-file-system") + flags+=("--parent=") + local_nonpersistent_flags+=("--parent=") + flags+=("--stdin") + local_nonpersistent_flags+=("--stdin") + flags+=("--stdin-filename=") + local_nonpersistent_flags+=("--stdin-filename=") + flags+=("--tag=") + local_nonpersistent_flags+=("--tag=") + flags+=("--time=") + local_nonpersistent_flags+=("--time=") + flags+=("--with-atime") + local_nonpersistent_flags+=("--with-atime") + flags+=("--cacert=") + flags+=("--cache-dir=") + flags+=("--cleanup-cache") + flags+=("--json") + flags+=("--key-hint=") + flags+=("--limit-download=") + flags+=("--limit-upload=") + flags+=("--no-cache") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("-o") + flags+=("--password-command=") + flags+=("--password-file=") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("-r") + flags+=("--tls-client-cert=") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_cache() +{ + last_command="restic_cache" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--cleanup") + local_nonpersistent_flags+=("--cleanup") + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + flags+=("--max-age=") + local_nonpersistent_flags+=("--max-age=") + flags+=("--no-size") + local_nonpersistent_flags+=("--no-size") + flags+=("--cacert=") + flags+=("--cache-dir=") + flags+=("--cleanup-cache") + flags+=("--json") + flags+=("--key-hint=") + flags+=("--limit-download=") + flags+=("--limit-upload=") + flags+=("--no-cache") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("-o") + flags+=("--password-command=") + flags+=("--password-file=") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("-r") + flags+=("--tls-client-cert=") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_cat() +{ + last_command="restic_cat" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + flags+=("--cacert=") + flags+=("--cache-dir=") + flags+=("--cleanup-cache") + flags+=("--json") + flags+=("--key-hint=") + flags+=("--limit-download=") + flags+=("--limit-upload=") + flags+=("--no-cache") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("-o") + flags+=("--password-command=") + flags+=("--password-file=") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("-r") + flags+=("--tls-client-cert=") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_check() +{ + last_command="restic_check" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--check-unused") + local_nonpersistent_flags+=("--check-unused") + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + flags+=("--read-data") + local_nonpersistent_flags+=("--read-data") + flags+=("--read-data-subset=") + local_nonpersistent_flags+=("--read-data-subset=") + flags+=("--with-cache") + local_nonpersistent_flags+=("--with-cache") + flags+=("--cacert=") + flags+=("--cache-dir=") + flags+=("--cleanup-cache") + flags+=("--json") + flags+=("--key-hint=") + flags+=("--limit-download=") + flags+=("--limit-upload=") + flags+=("--no-cache") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("-o") + flags+=("--password-command=") + flags+=("--password-file=") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("-r") + flags+=("--tls-client-cert=") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_diff() +{ + last_command="restic_diff" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + flags+=("--metadata") + local_nonpersistent_flags+=("--metadata") + flags+=("--cacert=") + flags+=("--cache-dir=") + flags+=("--cleanup-cache") + flags+=("--json") + flags+=("--key-hint=") + flags+=("--limit-download=") + flags+=("--limit-upload=") + flags+=("--no-cache") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("-o") + flags+=("--password-command=") + flags+=("--password-file=") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("-r") + flags+=("--tls-client-cert=") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_dump() +{ + last_command="restic_dump" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + flags+=("--host=") + two_word_flags+=("-H") + local_nonpersistent_flags+=("--host=") + flags+=("--path=") + local_nonpersistent_flags+=("--path=") + flags+=("--tag=") + local_nonpersistent_flags+=("--tag=") + flags+=("--cacert=") + flags+=("--cache-dir=") + flags+=("--cleanup-cache") + flags+=("--json") + flags+=("--key-hint=") + flags+=("--limit-download=") + flags+=("--limit-upload=") + flags+=("--no-cache") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("-o") + flags+=("--password-command=") + flags+=("--password-file=") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("-r") + flags+=("--tls-client-cert=") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_find() +{ + last_command="restic_find" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--blob") + local_nonpersistent_flags+=("--blob") + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + flags+=("--host=") + two_word_flags+=("-H") + local_nonpersistent_flags+=("--host=") + flags+=("--ignore-case") + flags+=("-i") + local_nonpersistent_flags+=("--ignore-case") + flags+=("--long") + flags+=("-l") + local_nonpersistent_flags+=("--long") + flags+=("--newest=") + two_word_flags+=("-N") + local_nonpersistent_flags+=("--newest=") + flags+=("--oldest=") + two_word_flags+=("-O") + local_nonpersistent_flags+=("--oldest=") + flags+=("--pack") + local_nonpersistent_flags+=("--pack") + flags+=("--path=") + local_nonpersistent_flags+=("--path=") + flags+=("--show-pack-id") + local_nonpersistent_flags+=("--show-pack-id") + flags+=("--snapshot=") + two_word_flags+=("-s") + local_nonpersistent_flags+=("--snapshot=") + flags+=("--tag=") + local_nonpersistent_flags+=("--tag=") + flags+=("--tree") + local_nonpersistent_flags+=("--tree") + flags+=("--cacert=") + flags+=("--cache-dir=") + flags+=("--cleanup-cache") + flags+=("--json") + flags+=("--key-hint=") + flags+=("--limit-download=") + flags+=("--limit-upload=") + flags+=("--no-cache") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("-o") + flags+=("--password-command=") + flags+=("--password-file=") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("-r") + flags+=("--tls-client-cert=") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_forget() +{ + last_command="restic_forget" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--keep-last=") + two_word_flags+=("-l") + local_nonpersistent_flags+=("--keep-last=") + flags+=("--keep-hourly=") + two_word_flags+=("-H") + local_nonpersistent_flags+=("--keep-hourly=") + flags+=("--keep-daily=") + two_word_flags+=("-d") + local_nonpersistent_flags+=("--keep-daily=") + flags+=("--keep-weekly=") + two_word_flags+=("-w") + local_nonpersistent_flags+=("--keep-weekly=") + flags+=("--keep-monthly=") + two_word_flags+=("-m") + local_nonpersistent_flags+=("--keep-monthly=") + flags+=("--keep-yearly=") + two_word_flags+=("-y") + local_nonpersistent_flags+=("--keep-yearly=") + flags+=("--keep-within=") + local_nonpersistent_flags+=("--keep-within=") + flags+=("--keep-tag=") + local_nonpersistent_flags+=("--keep-tag=") + flags+=("--host=") + local_nonpersistent_flags+=("--host=") + flags+=("--tag=") + local_nonpersistent_flags+=("--tag=") + flags+=("--path=") + local_nonpersistent_flags+=("--path=") + flags+=("--compact") + flags+=("-c") + local_nonpersistent_flags+=("--compact") + flags+=("--group-by=") + two_word_flags+=("-g") + local_nonpersistent_flags+=("--group-by=") + flags+=("--dry-run") + flags+=("-n") + local_nonpersistent_flags+=("--dry-run") + flags+=("--prune") + local_nonpersistent_flags+=("--prune") + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + flags+=("--cacert=") + flags+=("--cache-dir=") + flags+=("--cleanup-cache") + flags+=("--json") + flags+=("--key-hint=") + flags+=("--limit-download=") + flags+=("--limit-upload=") + flags+=("--no-cache") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("-o") + flags+=("--password-command=") + flags+=("--password-file=") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("-r") + flags+=("--tls-client-cert=") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_generate() +{ + last_command="restic_generate" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--bash-completion=") + local_nonpersistent_flags+=("--bash-completion=") + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + flags+=("--man=") + local_nonpersistent_flags+=("--man=") + flags+=("--zsh-completion=") + local_nonpersistent_flags+=("--zsh-completion=") + flags+=("--cacert=") + flags+=("--cache-dir=") + flags+=("--cleanup-cache") + flags+=("--json") + flags+=("--key-hint=") + flags+=("--limit-download=") + flags+=("--limit-upload=") + flags+=("--no-cache") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("-o") + flags+=("--password-command=") + flags+=("--password-file=") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("-r") + flags+=("--tls-client-cert=") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_init() +{ + last_command="restic_init" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + flags+=("--cacert=") + flags+=("--cache-dir=") + flags+=("--cleanup-cache") + flags+=("--json") + flags+=("--key-hint=") + flags+=("--limit-download=") + flags+=("--limit-upload=") + flags+=("--no-cache") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("-o") + flags+=("--password-command=") + flags+=("--password-file=") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("-r") + flags+=("--tls-client-cert=") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_key() +{ + last_command="restic_key" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + flags+=("--new-password-file=") + local_nonpersistent_flags+=("--new-password-file=") + flags+=("--cacert=") + flags+=("--cache-dir=") + flags+=("--cleanup-cache") + flags+=("--json") + flags+=("--key-hint=") + flags+=("--limit-download=") + flags+=("--limit-upload=") + flags+=("--no-cache") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("-o") + flags+=("--password-command=") + flags+=("--password-file=") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("-r") + flags+=("--tls-client-cert=") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_list() +{ + last_command="restic_list" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + flags+=("--cacert=") + flags+=("--cache-dir=") + flags+=("--cleanup-cache") + flags+=("--json") + flags+=("--key-hint=") + flags+=("--limit-download=") + flags+=("--limit-upload=") + flags+=("--no-cache") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("-o") + flags+=("--password-command=") + flags+=("--password-file=") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("-r") + flags+=("--tls-client-cert=") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_ls() +{ + last_command="restic_ls" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + flags+=("--host=") + two_word_flags+=("-H") + local_nonpersistent_flags+=("--host=") + flags+=("--long") + flags+=("-l") + local_nonpersistent_flags+=("--long") + flags+=("--path=") + local_nonpersistent_flags+=("--path=") + flags+=("--recursive") + local_nonpersistent_flags+=("--recursive") + flags+=("--tag=") + local_nonpersistent_flags+=("--tag=") + flags+=("--cacert=") + flags+=("--cache-dir=") + flags+=("--cleanup-cache") + flags+=("--json") + flags+=("--key-hint=") + flags+=("--limit-download=") + flags+=("--limit-upload=") + flags+=("--no-cache") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("-o") + flags+=("--password-command=") + flags+=("--password-file=") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("-r") + flags+=("--tls-client-cert=") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_migrate() +{ + last_command="restic_migrate" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--force") + flags+=("-f") + local_nonpersistent_flags+=("--force") + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + flags+=("--cacert=") + flags+=("--cache-dir=") + flags+=("--cleanup-cache") + flags+=("--json") + flags+=("--key-hint=") + flags+=("--limit-download=") + flags+=("--limit-upload=") + flags+=("--no-cache") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("-o") + flags+=("--password-command=") + flags+=("--password-file=") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("-r") + flags+=("--tls-client-cert=") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_mount() +{ + last_command="restic_mount" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--allow-other") + local_nonpersistent_flags+=("--allow-other") + flags+=("--allow-root") + local_nonpersistent_flags+=("--allow-root") + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + flags+=("--host=") + two_word_flags+=("-H") + local_nonpersistent_flags+=("--host=") + flags+=("--no-default-permissions") + local_nonpersistent_flags+=("--no-default-permissions") + flags+=("--owner-root") + local_nonpersistent_flags+=("--owner-root") + flags+=("--path=") + local_nonpersistent_flags+=("--path=") + flags+=("--snapshot-template=") + local_nonpersistent_flags+=("--snapshot-template=") + flags+=("--tag=") + local_nonpersistent_flags+=("--tag=") + flags+=("--cacert=") + flags+=("--cache-dir=") + flags+=("--cleanup-cache") + flags+=("--json") + flags+=("--key-hint=") + flags+=("--limit-download=") + flags+=("--limit-upload=") + flags+=("--no-cache") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("-o") + flags+=("--password-command=") + flags+=("--password-file=") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("-r") + flags+=("--tls-client-cert=") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_prune() +{ + last_command="restic_prune" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + flags+=("--cacert=") + flags+=("--cache-dir=") + flags+=("--cleanup-cache") + flags+=("--json") + flags+=("--key-hint=") + flags+=("--limit-download=") + flags+=("--limit-upload=") + flags+=("--no-cache") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("-o") + flags+=("--password-command=") + flags+=("--password-file=") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("-r") + flags+=("--tls-client-cert=") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_rebuild-index() +{ + last_command="restic_rebuild-index" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + flags+=("--cacert=") + flags+=("--cache-dir=") + flags+=("--cleanup-cache") + flags+=("--json") + flags+=("--key-hint=") + flags+=("--limit-download=") + flags+=("--limit-upload=") + flags+=("--no-cache") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("-o") + flags+=("--password-command=") + flags+=("--password-file=") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("-r") + flags+=("--tls-client-cert=") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_recover() +{ + last_command="restic_recover" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + flags+=("--cacert=") + flags+=("--cache-dir=") + flags+=("--cleanup-cache") + flags+=("--json") + flags+=("--key-hint=") + flags+=("--limit-download=") + flags+=("--limit-upload=") + flags+=("--no-cache") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("-o") + flags+=("--password-command=") + flags+=("--password-file=") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("-r") + flags+=("--tls-client-cert=") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_restore() +{ + last_command="restic_restore" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--exclude=") + two_word_flags+=("-e") + local_nonpersistent_flags+=("--exclude=") + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + flags+=("--host=") + two_word_flags+=("-H") + local_nonpersistent_flags+=("--host=") + flags+=("--include=") + two_word_flags+=("-i") + local_nonpersistent_flags+=("--include=") + flags+=("--path=") + local_nonpersistent_flags+=("--path=") + flags+=("--tag=") + local_nonpersistent_flags+=("--tag=") + flags+=("--target=") + two_word_flags+=("-t") + local_nonpersistent_flags+=("--target=") + flags+=("--verify") + local_nonpersistent_flags+=("--verify") + flags+=("--cacert=") + flags+=("--cache-dir=") + flags+=("--cleanup-cache") + flags+=("--json") + flags+=("--key-hint=") + flags+=("--limit-download=") + flags+=("--limit-upload=") + flags+=("--no-cache") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("-o") + flags+=("--password-command=") + flags+=("--password-file=") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("-r") + flags+=("--tls-client-cert=") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_self-update() +{ + last_command="restic_self-update" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + flags+=("--output=") + local_nonpersistent_flags+=("--output=") + flags+=("--cacert=") + flags+=("--cache-dir=") + flags+=("--cleanup-cache") + flags+=("--json") + flags+=("--key-hint=") + flags+=("--limit-download=") + flags+=("--limit-upload=") + flags+=("--no-cache") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("-o") + flags+=("--password-command=") + flags+=("--password-file=") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("-r") + flags+=("--tls-client-cert=") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_snapshots() +{ + last_command="restic_snapshots" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--compact") + flags+=("-c") + local_nonpersistent_flags+=("--compact") + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + flags+=("--host=") + two_word_flags+=("-H") + local_nonpersistent_flags+=("--host=") + flags+=("--last") + local_nonpersistent_flags+=("--last") + flags+=("--path=") + local_nonpersistent_flags+=("--path=") + flags+=("--tag=") + local_nonpersistent_flags+=("--tag=") + flags+=("--cacert=") + flags+=("--cache-dir=") + flags+=("--cleanup-cache") + flags+=("--json") + flags+=("--key-hint=") + flags+=("--limit-download=") + flags+=("--limit-upload=") + flags+=("--no-cache") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("-o") + flags+=("--password-command=") + flags+=("--password-file=") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("-r") + flags+=("--tls-client-cert=") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_stats() +{ + last_command="restic_stats" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + flags+=("--host=") + two_word_flags+=("-H") + local_nonpersistent_flags+=("--host=") + flags+=("--mode=") + local_nonpersistent_flags+=("--mode=") + flags+=("--cacert=") + flags+=("--cache-dir=") + flags+=("--cleanup-cache") + flags+=("--json") + flags+=("--key-hint=") + flags+=("--limit-download=") + flags+=("--limit-upload=") + flags+=("--no-cache") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("-o") + flags+=("--password-command=") + flags+=("--password-file=") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("-r") + flags+=("--tls-client-cert=") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_tag() +{ + last_command="restic_tag" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--add=") + local_nonpersistent_flags+=("--add=") + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + flags+=("--host=") + two_word_flags+=("-H") + local_nonpersistent_flags+=("--host=") + flags+=("--path=") + local_nonpersistent_flags+=("--path=") + flags+=("--remove=") + local_nonpersistent_flags+=("--remove=") + flags+=("--set=") + local_nonpersistent_flags+=("--set=") + flags+=("--tag=") + local_nonpersistent_flags+=("--tag=") + flags+=("--cacert=") + flags+=("--cache-dir=") + flags+=("--cleanup-cache") + flags+=("--json") + flags+=("--key-hint=") + flags+=("--limit-download=") + flags+=("--limit-upload=") + flags+=("--no-cache") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("-o") + flags+=("--password-command=") + flags+=("--password-file=") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("-r") + flags+=("--tls-client-cert=") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_unlock() +{ + last_command="restic_unlock" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + flags+=("--remove-all") + local_nonpersistent_flags+=("--remove-all") + flags+=("--cacert=") + flags+=("--cache-dir=") + flags+=("--cleanup-cache") + flags+=("--json") + flags+=("--key-hint=") + flags+=("--limit-download=") + flags+=("--limit-upload=") + flags+=("--no-cache") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("-o") + flags+=("--password-command=") + flags+=("--password-file=") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("-r") + flags+=("--tls-client-cert=") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_version() +{ + last_command="restic_version" + + command_aliases=() + + commands=() + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + flags+=("--cacert=") + flags+=("--cache-dir=") + flags+=("--cleanup-cache") + flags+=("--json") + flags+=("--key-hint=") + flags+=("--limit-download=") + flags+=("--limit-upload=") + flags+=("--no-cache") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("-o") + flags+=("--password-command=") + flags+=("--password-file=") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("-r") + flags+=("--tls-client-cert=") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +_restic_root_command() +{ + last_command="restic" + + command_aliases=() + + commands=() + commands+=("backup") + commands+=("cache") + commands+=("cat") + commands+=("check") + commands+=("diff") + commands+=("dump") + commands+=("find") + commands+=("forget") + commands+=("generate") + commands+=("init") + commands+=("key") + commands+=("list") + commands+=("ls") + commands+=("migrate") + commands+=("mount") + commands+=("prune") + commands+=("rebuild-index") + commands+=("recover") + commands+=("restore") + commands+=("self-update") + commands+=("snapshots") + commands+=("stats") + commands+=("tag") + commands+=("unlock") + commands+=("version") + + flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + + flags+=("--cacert=") + flags+=("--cache-dir=") + flags+=("--cleanup-cache") + flags+=("--help") + flags+=("-h") + local_nonpersistent_flags+=("--help") + flags+=("--json") + flags+=("--key-hint=") + flags+=("--limit-download=") + flags+=("--limit-upload=") + flags+=("--no-cache") + flags+=("--no-lock") + flags+=("--option=") + two_word_flags+=("-o") + flags+=("--password-command=") + flags+=("--password-file=") + two_word_flags+=("-p") + flags+=("--quiet") + flags+=("-q") + flags+=("--repo=") + two_word_flags+=("-r") + flags+=("--tls-client-cert=") + flags+=("--verbose") + flags+=("-v") + + must_have_one_flag=() + must_have_one_noun=() + noun_aliases=() +} + +__start_restic() +{ + local cur prev words cword + declare -A flaghash 2>/dev/null || : + declare -A aliashash 2>/dev/null || : + if declare -F _init_completion >/dev/null 2>&1; then + _init_completion -s || return + else + __restic_init_completion -n "=" || return + fi + + local c=0 + local flags=() + local two_word_flags=() + local local_nonpersistent_flags=() + local flags_with_completion=() + local flags_completion=() + local commands=("restic") + local must_have_one_flag=() + local must_have_one_noun=() + local last_command + local nouns=() + + __restic_handle_word +} + +if [[ $(type -t compopt) = "builtin" ]]; then + complete -o default -F __start_restic restic +else + complete -o default -o nospace -F __start_restic restic +fi + +# ex: ts=4 sw=4 et filetype=sh diff --git a/doc/cache.rst b/doc/cache.rst new file mode 100644 index 000000000..4c8009795 --- /dev/null +++ b/doc/cache.rst @@ -0,0 +1,36 @@ +*********** +Local Cache +*********** + +In order to speed up certain operations, restic manages a local cache of data. +This document describes the data structures for the local cache with version 1. + +Versions +======== + +The cache directory is selected according to the `XDG base dir specification +`__. +Each repository has its own cache sub-directory, consisting of the repository ID +which is chosen at ``init``. All cache directories for different repos are +independent of each other. + +The cache dir for a repo contains a file named ``version``, which contains a +single ASCII integer line that stands for the current version of the cache. If +a lower version number is found the cache is recreated with the current +version. If a higher version number is found the cache is ignored and left as +is. + +Snapshots, Data and Indexes +=========================== + +Snapshot, Data and Index files are cached in the sub-directories ``snapshots``, +``data`` and ``index``, as read from the repository. + +Expiry +====== + +Whenever a cache directory for a repo is used, that directory's modification +timestamp is updated to the current time. By looking at the modification +timestamps of the repo cache directories it is easy to decide which directories +are old and haven't been used in a long time. Those are probably stale and can +be removed. diff --git a/doc/conf.py b/doc/conf.py new file mode 100644 index 000000000..3c0af927b --- /dev/null +++ b/doc/conf.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# +# restic documentation build configuration file, created by +# sphinx-quickstart on Fri Apr 14 22:44:43 2017. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. +import os + +# -- General configuration ------------------------------------------------ + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = ['sphinx.ext.extlinks'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = 'restic' +copyright = '2018, restic authors' +author = 'fd0' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. + +# read version from ../VERSION +version = open('../VERSION').readlines()[0] +# The full version, including alpha/beta/rc tags. +release = version + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This patterns also effect to html_static_path and html_extra_path +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +if os.environ.get('READTHEDOCS') == 'True': + html_context = { + 'css_files': [ + 'https://media.readthedocs.org/css/sphinx_rtd_theme.css', + 'https://media.readthedocs.org/css/readthedocs-doc-embed.css', + '_static/css/restic.css', + ] + } +else: + # we're not built by rtd => add rtd-theme + import sphinx_rtd_theme + html_theme = 'sphinx_rtd_theme' + html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] + html_style = 'css/restic.css' + +html_logo = 'logo/logo.png' + +html_favicon = '_static/favicon.ico' + +html_show_version = False + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + + +# -- Options for HTMLHelp output ------------------------------------------ + +# Output file base name for HTML help builder. +htmlhelp_basename = 'resticdoc' + +extlinks = { + 'issue': ('https://github.com/restic/restic/issues/%s', '#'), +} diff --git a/doc/design.rst b/doc/design.rst new file mode 100644 index 000000000..dd209c2fc --- /dev/null +++ b/doc/design.rst @@ -0,0 +1,608 @@ + +Terminology +=========== + +This section introduces terminology used in this document. + +*Repository*: All data produced during a backup is sent to and stored in +a repository in a structured form, for example in a file system +hierarchy with several subdirectories. A repository implementation must +be able to fulfill a number of operations, e.g. list the contents. + +*Blob*: A Blob combines a number of data bytes with identifying +information like the SHA-256 hash of the data and its length. + +*Pack*: A Pack combines one or more Blobs, e.g. in a single file. + +*Snapshot*: A Snapshot stands for the state of a file or directory that +has been backed up at some point in time. The state here means the +content and meta data like the name and modification time for the file +or the directory and its contents. + +*Storage ID*: A storage ID is the SHA-256 hash of the content stored in +the repository. This ID is required in order to load the file from the +repository. + +Repository Format +================= + +All data is stored in a restic repository. A repository is able to store +data of several different types, which can later be requested based on +an ID. This so-called "storage ID" is the SHA-256 hash of the content of +a file. All files in a repository are only written once and never +modified afterwards. This allows accessing and even writing to the +repository with multiple clients in parallel. Only the ``prune`` operation +removes data from the repository. + +Repositories consist of several directories and a top-level file called +``config``. For all other files stored in the repository, the name for +the file is the lower case hexadecimal representation of the storage ID, +which is the SHA-256 hash of the file's contents. This allows for easy +verification of files for accidental modifications, like disk read +errors, by simply running the program ``sha256sum`` on the file and +comparing its output to the file name. If the prefix of a filename is +unique amongst all the other files in the same directory, the prefix may +be used instead of the complete filename. + +Apart from the files stored within the ``keys`` directory, all files are +encrypted with AES-256 in counter mode (CTR). The integrity of the +encrypted data is secured by a Poly1305-AES message authentication code +(sometimes also referred to as a "signature"). + +In the first 16 bytes of each encrypted file the initialisation vector +(IV) is stored. It is followed by the encrypted data and completed by +the 16 byte MAC. The format is: ``IV || CIPHERTEXT || MAC``. The +complete encryption overhead is 32 bytes. For each file, a new random IV +is selected. + +The file ``config`` is encrypted this way and contains a JSON document +like the following: + +.. code:: json + + { + "version": 1, + "id": "5956a3f67a6230d4a92cefb29529f10196c7d92582ec305fd71ff6d331d6271b", + "chunker_polynomial": "25b468838dcb75" + } + +After decryption, restic first checks that the version field contains a +version number that it understands, otherwise it aborts. At the moment, +the version is expected to be 1. The field ``id`` holds a unique ID +which consists of 32 random bytes, encoded in hexadecimal. This uniquely +identifies the repository, regardless if it is accessed via SFTP or +locally. The field ``chunker_polynomial`` contains a parameter that is +used for splitting large files into smaller chunks (see below). + +Repository Layout +----------------- + +The ``local`` and ``sftp`` backends are implemented using files and +directories stored in a file system. The directory layout is the same +for both backend types. + +The basic layout of a repository stored in a ``local`` or ``sftp`` +backend is shown here: + +:: + + /tmp/restic-repo + ├── config + ├── data + │ ├── 21 + │ │ └── 2159dd48f8a24f33c307b750592773f8b71ff8d11452132a7b2e2a6a01611be1 + │ ├── 32 + │ │ └── 32ea976bc30771cebad8285cd99120ac8786f9ffd42141d452458089985043a5 + │ ├── 59 + │ │ └── 59fe4bcde59bd6222eba87795e35a90d82cd2f138a27b6835032b7b58173a426 + │ ├── 73 + │ │ └── 73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c + │ [...] + ├── index + │ ├── c38f5fb68307c6a3e3aa945d556e325dc38f5fb68307c6a3e3aa945d556e325d + │ └── ca171b1b7394d90d330b265d90f506f9984043b342525f019788f97e745c71fd + ├── keys + │ └── b02de829beeb3c01a63e6b25cbd421a98fef144f03b9a02e46eff9e2ca3f0bd7 + ├── locks + ├── snapshots + │ └── 22a5af1bdc6e616f8a29579458c49627e01b32210d09adb288d1ecda7c5711ec + └── tmp + +A local repository can be initialized with the ``restic init`` command, +e.g.: + +.. code-block:: console + + $ restic -r /tmp/restic-repo init + +The local and sftp backends will auto-detect and accept all layouts described +in the following sections, so that remote repositories mounted locally e.g. via +fuse can be accessed. The layout auto-detection can be overridden by specifying +the option ``-o local.layout=default``, valid values are ``default`` and +``s3legacy``. The option for the sftp backend is named ``sftp.layout``, for the +s3 backend ``s3.layout``. + +S3 Legacy Layout +---------------- + +Unfortunately during development the AWS S3 backend uses slightly different +paths (directory names use singular instead of plural for ``key``, +``lock``, and ``snapshot`` files), and the data files are stored directly below +the ``data`` directory. The S3 Legacy repository layout looks like this: + +:: + + /config + /data + ├── 2159dd48f8a24f33c307b750592773f8b71ff8d11452132a7b2e2a6a01611be1 + ├── 32ea976bc30771cebad8285cd99120ac8786f9ffd42141d452458089985043a5 + ├── 59fe4bcde59bd6222eba87795e35a90d82cd2f138a27b6835032b7b58173a426 + ├── 73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c + [...] + /index + ├── c38f5fb68307c6a3e3aa945d556e325dc38f5fb68307c6a3e3aa945d556e325d + └── ca171b1b7394d90d330b265d90f506f9984043b342525f019788f97e745c71fd + /key + └── b02de829beeb3c01a63e6b25cbd421a98fef144f03b9a02e46eff9e2ca3f0bd7 + /lock + /snapshot + └── 22a5af1bdc6e616f8a29579458c49627e01b32210d09adb288d1ecda7c5711ec + +The S3 backend understands and accepts both forms, new backends are +always created with the default layout for compatibility reasons. + +Pack Format +=========== + +All files in the repository except Key and Pack files just contain raw +data, stored as ``IV || Ciphertext || MAC``. Pack files may contain one +or more Blobs of data. + +A Pack's structure is as follows: + +:: + + EncryptedBlob1 || ... || EncryptedBlobN || EncryptedHeader || Header_Length + +At the end of the Pack file is a header, which describes the content. +The header is encrypted and authenticated. ``Header_Length`` is the +length of the encrypted header encoded as a four byte integer in +little-endian encoding. Placing the header at the end of a file allows +writing the blobs in a continuous stream as soon as they are read during +the backup phase. This reduces code complexity and avoids having to +re-write a file once the pack is complete and the content and length of +the header is known. + +All the blobs (``EncryptedBlob1``, ``EncryptedBlobN`` etc.) are +authenticated and encrypted independently. This enables repository +reorganisation without having to touch the encrypted Blobs. In addition +it also allows efficient indexing, for only the header needs to be read +in order to find out which Blobs are contained in the Pack. Since the +header is authenticated, authenticity of the header can be checked +without having to read the complete Pack. + +After decryption, a Pack's header consists of the following elements: + +:: + + Type_Blob1 || Length(EncryptedBlob1) || Hash(Plaintext_Blob1) || + [...] + Type_BlobN || Length(EncryptedBlobN) || Hash(Plaintext_Blobn) || + +This is enough to calculate the offsets for all the Blobs in the Pack. +Length is the length of a Blob as a four byte integer in little-endian +format. The type field is a one byte field and labels the content of a +blob according to the following table: + ++--------+-----------+ +| Type | Meaning | ++========+===========+ +| 0 | data | ++--------+-----------+ +| 1 | tree | ++--------+-----------+ + +All other types are invalid, more types may be added in the future. + +For reconstructing the index or parsing a pack without an index, first +the last four bytes must be read in order to find the length of the +header. Afterwards, the header can be read and parsed, which yields all +plaintext hashes, types, offsets and lengths of all included blobs. + +Indexing +======== + +Index files contain information about Data and Tree Blobs and the Packs +they are contained in and store this information in the repository. When +the local cached index is not accessible any more, the index files can +be downloaded and used to reconstruct the index. The files are encrypted +and authenticated like Data and Tree Blobs, so the outer structure is +``IV || Ciphertext || MAC`` again. The plaintext consists of a JSON +document like the following: + +.. code:: json + + { + "supersedes": [ + "ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452" + ], + "packs": [ + { + "id": "73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c", + "blobs": [ + { + "id": "3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce", + "type": "data", + "offset": 0, + "length": 25 + },{ + "id": "9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae", + "type": "tree", + "offset": 38, + "length": 100 + }, + { + "id": "d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66", + "type": "data", + "offset": 150, + "length": 123 + } + ] + }, [...] + ] + } + +This JSON document lists Packs and the blobs contained therein. In this +example, the Pack ``73d04e61`` contains two data Blobs and one Tree +blob, the plaintext hashes are listed afterwards. + +The field ``supersedes`` lists the storage IDs of index files that have +been replaced with the current index file. This happens when index files +are repacked, for example when old snapshots are removed and Packs are +recombined. + +There may be an arbitrary number of index files, containing information +on non-disjoint sets of Packs. The number of packs described in a single +file is chosen so that the file size is kept below 8 MiB. + +Keys, Encryption and MAC +======================== + +All data stored by restic in the repository is encrypted with AES-256 in +counter mode and authenticated using Poly1305-AES. For encrypting new +data first 16 bytes are read from a cryptographically secure +pseudorandom number generator as a random nonce. This is used both as +the IV for counter mode and the nonce for Poly1305. This operation needs +three keys: A 32 byte for AES-256 for encryption, a 16 byte AES key and +a 16 byte key for Poly1305. For details see the original paper `The +Poly1305-AES message-authentication +code `__ by Dan Bernstein. +The data is then encrypted with AES-256 and afterwards a message +authentication code (MAC) is computed over the ciphertext, everything is +then stored as IV \|\| CIPHERTEXT \|\| MAC. + +The directory ``keys`` contains key files. These are simple JSON +documents which contain all data that is needed to derive the +repository's master encryption and message authentication keys from a +user's password. The JSON document from the repository can be +pretty-printed for example by using the Python module ``json`` +(shortened to increase readability): + +:: + + $ python -mjson.tool /tmp/restic-repo/keys/b02de82* + { + "hostname": "kasimir", + "username": "fd0" + "kdf": "scrypt", + "N": 65536, + "r": 8, + "p": 1, + "created": "2015-01-02T18:10:13.48307196+01:00", + "data": "tGwYeKoM0C4j4/9DFrVEmMGAldvEn/+iKC3te/QE/6ox/V4qz58FUOgMa0Bb1cIJ6asrypCx/Ti/pRXCPHLDkIJbNYd2ybC+fLhFIJVLCvkMS+trdywsUkglUbTbi+7+Ldsul5jpAj9vTZ25ajDc+4FKtWEcCWL5ICAOoTAxnPgT+Lh8ByGQBH6KbdWabqamLzTRWxePFoYuxa7yXgmj9A==", + "salt": "uW4fEI1+IOzj7ED9mVor+yTSJFd68DGlGOeLgJELYsTU5ikhG/83/+jGd4KKAaQdSrsfzrdOhAMftTSih5Ux6w==", + } + +When the repository is opened by restic, the user is prompted for the +repository password. This is then used with ``scrypt``, a key derivation +function (KDF), and the supplied parameters (``N``, ``r``, ``p`` and +``salt``) to derive 64 key bytes. The first 32 bytes are used as the +encryption key (for AES-256) and the last 32 bytes are used as the +message authentication key (for Poly1305-AES). These last 32 bytes are +divided into a 16 byte AES key ``k`` followed by 16 bytes of secret key +``r``. The key ``r`` is then masked for use with Poly1305 (see the paper +for details). + +Those keys are used to authenticate and decrypt the bytes contained in +the JSON field ``data`` with AES-256 and Poly1305-AES as if they were +any other blob (after removing the Base64 encoding). If the +password is incorrect or the key file has been tampered with, the +computed MAC will not match the last 16 bytes of the data, and restic +exits with an error. Otherwise, the data yields a JSON document +which contains the master encryption and message authentication keys for +this repository (encoded in Base64). The command +``restic cat masterkey`` can be used as follows to decrypt and +pretty-print the master key: + +.. code-block:: console + + $ restic -r /tmp/restic-repo cat masterkey + { + "mac": { + "k": "evFWd9wWlndL9jc501268g==", + "r": "E9eEDnSJZgqwTOkDtOp+Dw==" + }, + "encrypt": "UQCqa0lKZ94PygPxMRqkePTZnHRYh1k1pX2k2lM2v3Q=", + } + +All data in the repository is encrypted and authenticated with these +master keys. For encryption, the AES-256 algorithm in Counter mode is +used. For message authentication, Poly1305-AES is used as described +above. + +A repository can have several different passwords, with a key file for +each. This way, the password can be changed without having to re-encrypt +all data. + +Snapshots +========= + +A snapshot represents a directory with all files and sub-directories at +a given point in time. For each backup that is made, a new snapshot is +created. A snapshot is a JSON document that is stored in an encrypted +file below the directory ``snapshots`` in the repository. The filename +is the storage ID. This string is unique and used within restic to +uniquely identify a snapshot. + +The command ``restic cat snapshot`` can be used as follows to decrypt +and pretty-print the contents of a snapshot file: + +.. code-block:: console + + $ restic -r /tmp/restic-repo cat snapshot 251c2e58 + enter password for repository: + { + "time": "2015-01-02T18:10:50.895208559+01:00", + "tree": "2da81727b6585232894cfbb8f8bdab8d1eccd3d8f7c92bc934d62e62e618ffdf", + "dir": "/tmp/testdata", + "hostname": "kasimir", + "username": "fd0", + "uid": 1000, + "gid": 100, + "tags": [ + "NL" + ] + } + +Here it can be seen that this snapshot represents the contents of the +directory ``/tmp/testdata``. The most important field is ``tree``. When +the meta data (e.g. the tags) of a snapshot change, the snapshot needs +to be re-encrypted and saved. This will change the storage ID, so in +order to relate these seemingly different snapshots, a field +``original`` is introduced which contains the ID of the original +snapshot, e.g. after adding the tag ``DE`` to the snapshot above it +becomes: + +.. code-block:: console + + $ restic -r /tmp/restic-repo cat snapshot 22a5af1b + enter password for repository: + { + "time": "2015-01-02T18:10:50.895208559+01:00", + "tree": "2da81727b6585232894cfbb8f8bdab8d1eccd3d8f7c92bc934d62e62e618ffdf", + "dir": "/tmp/testdata", + "hostname": "kasimir", + "username": "fd0", + "uid": 1000, + "gid": 100, + "tags": [ + "NL", + "DE" + ], + "original": "251c2e5841355f743f9d4ffd3260bee765acee40a6229857e32b60446991b837" + } + +Once introduced, the ``original`` field is not modified when the +snapshot's meta data is changed again. + +All content within a restic repository is referenced according to its +SHA-256 hash. Before saving, each file is split into variable sized +Blobs of data. The SHA-256 hashes of all Blobs are saved in an ordered +list which then represents the content of the file. + +In order to relate these plaintext hashes to the actual location within +a Pack file , an index is used. If the index is not available, the +header of all data Blobs can be read. + +Trees and Data +============== + +A snapshot references a tree by the SHA-256 hash of the JSON string +representation of its contents. Trees and data are saved in pack files +in a subdirectory of the directory ``data``. + +The command ``restic cat blob`` can be used to inspect the tree +referenced above (piping the output of the command to ``jq .`` so that +the JSON is indented): + +.. code-block:: console + + $ restic -r /tmp/restic-repo cat blob 2da81727b6585232894cfbb8f8bdab8d1eccd3d8f7c92bc934d62e62e618ffdf | jq . + enter password for repository: + { + "nodes": [ + { + "name": "testdata", + "type": "dir", + "mode": 493, + "mtime": "2014-12-22T14:47:59.912418701+01:00", + "atime": "2014-12-06T17:49:21.748468803+01:00", + "ctime": "2014-12-22T14:47:59.912418701+01:00", + "uid": 1000, + "gid": 100, + "user": "fd0", + "inode": 409704562, + "content": null, + "subtree": "b26e315b0988ddcd1cee64c351d13a100fedbc9fdbb144a67d1b765ab280b4dc" + } + ] + } + +A tree contains a list of entries (in the field ``nodes``) which contain +meta data like a name and timestamps. When the entry references a +directory, the field ``subtree`` contains the plain text ID of another +tree object. + +When the command ``restic cat blob`` is used, the plaintext ID is needed +to print a tree. The tree referenced above can be dumped as follows: + +.. code-block:: console + + $ restic -r /tmp/restic-repo cat blob b26e315b0988ddcd1cee64c351d13a100fedbc9fdbb144a67d1b765ab280b4dc + enter password for repository: + { + "nodes": [ + { + "name": "testfile", + "type": "file", + "mode": 420, + "mtime": "2014-12-06T17:50:23.34513538+01:00", + "atime": "2014-12-06T17:50:23.338468713+01:00", + "ctime": "2014-12-06T17:50:23.34513538+01:00", + "uid": 1000, + "gid": 100, + "user": "fd0", + "inode": 416863351, + "size": 1234, + "links": 1, + "content": [ + "50f77b3b4291e8411a027b9f9b9e64658181cc676ce6ba9958b95f268cb1109d" + ] + }, + [...] + ] + } + +This tree contains a file entry. This time, the ``subtree`` field is not +present and the ``content`` field contains a list with one plain text +SHA-256 hash. + +The command ``restic cat blob`` can also be used to extract and decrypt +data given a plaintext ID, e.g. for the data mentioned above: + +.. code-block:: console + + $ restic -r /tmp/restic-repo cat blob 50f77b3b4291e8411a027b9f9b9e64658181cc676ce6ba9958b95f268cb1109d | sha256sum + enter password for repository: + 50f77b3b4291e8411a027b9f9b9e64658181cc676ce6ba9958b95f268cb1109d - + +As can be seen from the output of the program ``sha256sum``, the hash +matches the plaintext hash from the map included in the tree above, so +the correct data has been returned. + +Locks +===== + +The restic repository structure is designed in a way that allows +parallel access of multiple instance of restic and even parallel writes. +However, there are some functions that work more efficient or even +require exclusive access of the repository. In order to implement these +functions, restic processes are required to create a lock on the +repository before doing anything. + +Locks come in two types: Exclusive and non-exclusive locks. At most one +process can have an exclusive lock on the repository, and during that +time there must not be any other locks (exclusive and non-exclusive). +There may be multiple non-exclusive locks in parallel. + +A lock is a file in the subdir ``locks`` whose filename is the storage +ID of the contents. It is encrypted and authenticated the same way as +other files in the repository and contains the following JSON structure: + +.. code:: json + + { + "time": "2015-06-27T12:18:51.759239612+02:00", + "exclusive": false, + "hostname": "kasimir", + "username": "fd0", + "pid": 13607, + "uid": 1000, + "gid": 100 + } + +The field ``exclusive`` defines the type of lock. When a new lock is to +be created, restic checks all locks in the repository. When a lock is +found, it is tested if the lock is stale, which is the case for locks +with timestamps older than 30 minutes. If the lock was created on the +same machine, even for younger locks it is tested whether the process is +still alive by sending a signal to it. If that fails, restic assumes +that the process is dead and considers the lock to be stale. + +When a new lock is to be created and no other conflicting locks are +detected, restic creates a new lock, waits, and checks if other locks +appeared in the repository. Depending on the type of the other locks and +the lock to be created, restic either continues or fails. + +Backups and Deduplication +========================= + +For creating a backup, restic scans the source directory for all files, +sub-directories and other entries. The data from each file is split into +variable length Blobs cut at offsets defined by a sliding window of 64 +byte. The implementation uses Rabin Fingerprints for implementing this +Content Defined Chunking (CDC). An irreducible polynomial is selected at +random and saved in the file ``config`` when a repository is +initialized, so that watermark attacks are much harder. + +Files smaller than 512 KiB are not split, Blobs are of 512 KiB to 8 MiB +in size. The implementation aims for 1 MiB Blob size on average. + +For modified files, only modified Blobs have to be saved in a subsequent +backup. This even works if bytes are inserted or removed at arbitrary +positions within the file. + +Threat Model +============ + +The design goals for restic include being able to securely store backups +in a location that is not completely trusted, e.g. a shared system where +others can potentially access the files or (in the case of the system +administrator) even modify or delete them. + +General assumptions: + +- The host system a backup is created on is trusted. This is the most + basic requirement, and essential for creating trustworthy backups. + +The restic backup program guarantees the following: + +- Accessing the unencrypted content of stored files and metadata should + not be possible without a password for the repository. Everything + except the metadata included for informational purposes in the key + files is encrypted and authenticated. + +- Modifications (intentional or unintentional) can be detected + automatically on several layers: + + 1. For all accesses of data stored in the repository it is checked + whether the cryptographic hash of the contents matches the storage + ID (the file's name). This way, modifications (bad RAM, broken + harddisk) can be detected easily. + + 2. Before decrypting any data, the MAC on the encrypted data is + checked. If there has been a modification, the MAC check will + fail. This step happens even before the data is decrypted, so data + that has been tampered with is not decrypted at all. + +However, the restic backup program is not designed to protect against +attackers deleting files at the storage location. There is nothing that +can be done about this. If this needs to be guaranteed, get a secure +location without any access from third parties. If you assume that +attackers have write access to your files at the storage location, +attackers are able to figure out (e.g. based on the timestamps of the +stored files) which files belong to what snapshot. When only these files +are deleted, the particular snapshot vanished and all snapshots +depending on data that has been added in the snapshot cannot be restored +completely. Restic is not designed to detect this attack. + diff --git a/doc/developer_information.rst b/doc/developer_information.rst new file mode 100644 index 000000000..f962863e7 --- /dev/null +++ b/doc/developer_information.rst @@ -0,0 +1,123 @@ +Developer Information +##################### + +Reproducible Builds +******************* + +This section describes how to reproduce the official released binaries for +restic for version 0.9.3 and later. The binary produced depends on the +following things: + + * The source code for the release + * The exact version of the official `Go compiler `__ used to produce the binaries (running ``restic version`` will print this) + * The architecture and operating system the Go compiler runs on (Linux, ``amd64``) + * The path where the source code is extracted to (``/restic``) + * The path to the Go compiler (``/usr/local/go``) + * The build tags (for official binaries, it's the tag ``selfupdate``) + * The environment variables (mostly ``$GOOS``, ``$GOARCH``, ``$CGO_ENABLED``) + +In addition, The compressed ZIP files for Windows depends on the modification +timestamp of the binary contained in it. In order to reproduce the exact same +ZIP file every time, we update the timestamp of the file ``VERSION`` in the +source code archive and set the timezone to Europe/Berlin. + +In the following example, we'll use the file ``restic-0.9.3.tar.gz`` and Go +1.11.1 to reproduce the released binaries. + +1. Download and extract the Go compiler into ``/usr/local/go``: + +.. code:: + + $ cd /usr/local + $ curl -L https://dl.google.com/go/go1.11.1.linux-amd64.tar.gz | tar xz + +2. Extract the restic source code into ``/restic`` + +.. code:: + + $ mkdir /restic + $ cd /restic + $ TZ=Europe/Berlin curl -L https://github.com/restic/restic/releases/download/v0.9.3/restic-0.9.3.tar.gz | tar xz --strip-components=1 + +3. Build the binaries for Windows and Linux: + +.. code:: + + $ export PATH=/usr/local/go/bin:$PATH + $ go version + go version go1.11.1 linux/amd64 + + $ GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -mod=vendor -ldflags "-s -w" -tags selfupdate -o restic_linux_amd64 ./cmd/restic + $ bzip2 restic_linux_amd64 + + $ GOOS=windows GOARCH=amd64 CGO_ENABLED=0 go build -mod=vendor -ldflags "-s -w" -tags selfupdate -o restic_windows_amd64.exe ./cmd/restic + $ touch --reference VERSION restic_windows_amd64.exe + $ TZ=Europe/Berlin zip -q -X restic_windows_amd64.zip restic_windows_amd64.exe + +Building the Official Binaries +****************************** + +The released binaries for restic are built using a Docker container. You can +find it on `Docker Hub `__ as +``restic/builder``, the ``Dockerfile`` and instructions on how to build the +container can be found in the `GitHub repository +`__ + +The container serves the following goals: + * Have a very controlled environment which is independent from the local system + * Make it easy to have the correct version of the Go compiler at the right path + * Make it easy to pass in the source code to build at a well-defined path + +The following steps are necessary to build the binaries: + +1. Either build the container (see the instructions in the `repository's README `__). Alternatively, download the container from the hub: + +.. code:: + + docker pull restic/builder + +2. Extract the source code somewhere: + +.. code:: + + tar xvzf restic-0.9.3.tar.gz + +3. Create a directory to place the resulting binaries in: + +.. code:: + + mkdir output + +3. Mount the source code and the output directory in the container and run the default command, which starts ``helpers/build-release-binaries/main.go``: + +.. code:: + + docker run --rm \ + --volume "$PWD/restic-0.9.3:/restic" \ + --volume "$PWD/output:/output" \ + restic/builder + +4. If anything goes wrong, you can enable debug output by specifying the call to ``helpers/build-release-binaries/main.go`` like this: + +.. code:: + + docker run --rm \ + --volume "$PWD/restic-0.9.3:/restic" \ + --volume "$PWD/output:/output" \ + restic/builder \ + go run -mod=vendor helpers/build-release-binaries/main.go --verbose + +Prepare a New Release +********************* + +Publishing a new release of restic requires many different steps. We've +automated this in the Go program ``helpers/prepare-release/main.go`` which also +includes checking that e.g. the changelog is correctly generated. The only +required argument is the new version number (in `Semantic Versioning +`__ format ``MAJOR.MINOR.PATCH``): + +.. code:: + + go run -mod=vendor helpers/prepare-release/main.go 0.9.3 + +Checks can be skipped on demand via flags, please see ``--help`` for details. diff --git a/doc/faq.rst b/doc/faq.rst new file mode 100644 index 000000000..077b7ffbf --- /dev/null +++ b/doc/faq.rst @@ -0,0 +1,175 @@ +FAQ +=== + +This is the list of Frequently Asked Questions for restic. + +``restic check`` reports packs that aren't referenced in any index, is my repository broken? +-------------------------------------------------------------------------------------------- + +When ``restic check`` reports that there are pack files in the +repository that are not referenced in any index, that's (in contrast to +what restic reports at the moment) not a source for concern. The output +looks like this: + +:: + + $ restic check + Create exclusive lock for repository + Load indexes + Check all packs + pack 819a9a52e4f51230afa89aefbf90df37fb70996337ae57e6f7a822959206a85e: not referenced in any index + pack de299e69fb075354a3775b6b045d152387201f1cdc229c31d1caa34c3b340141: not referenced in any index + Check snapshots, trees and blobs + Fatal: repository contains errors + +The message means that there is more data stored in the repo than +strictly necessary. With high probability this is duplicate data. In +order to clean it up, the command ``restic prune`` can be used. The +cause of this bug is not yet known. + +I ran a ``restic`` command but it is not working as intended, what do I do now? +------------------------------------------------------------------------------- + +If you are running a restic command and it is not working as you hoped it would, +there is an easy way of checking how your shell interpreted the command you are trying to run. + +Here is an example of a mistake in a backup command that results in the command not working as expected. +A user wants to run the following ``restic backup`` command + +:: + +$ restic backup --exclude "~/documents" ~ + +.. important:: This command contains an intentional user error described in this paragraph. + +This command will result in a complete backup of the current logged in user's home directory and it won't exclude the folder ``~/documents/`` - which is not what the user wanted to achieve. +The problem is how the path to ``~/documents`` is passed to restic. + +In order to spot an issue like this, you can make use of the following ruby command preceding your restic command. + +:: + + $ ruby -e 'puts ARGV.inspect' restic backup --exclude "~/documents" ~ + ["restic", "backup", "--exclude", "~/documents", "/home/john"] + +As you can see, the command outputs every argument you have passed to the shell. This is what restic sees when you run your command. +The error here is that the tilde ``~`` in ``"~/documents"`` didn't get expanded as it is quoted. + +:: + + $ echo ~/documents + /home/john/documents + + $ echo "~/documents" + ~/document + + $ echo "$HOME/documents" + /home/john/documents + +Restic handles globbing and expansion in the following ways: + +- Globbing is only expanded for lines read via ``--files-from`` +- Environment variables are not expanded in the file read via ``--files-from`` +- ``*`` is expanded for paths read via ``--files-from`` +- e.g. For backup targets given to restic as arguments on the shell, neither glob expansion nor shell variable replacement is done. If restic is called as ``restic backup '*' '$HOME'``, it will try to backup the literal file(s)/dir(s) ``*`` and ``$HOME`` +- Double-asterisk ``**`` only works in exclude patterns as this is a custom extension built into restic; the shell must not expand it + + +How can I specify encryption passwords automatically? +----------------------------------------------------- + +When you run ``restic backup``, you need to enter the passphrase on +the console. This is not very convenient for automated backups, so you +can also provide the password through the ``--password-file`` option, or one of +the environment variables ``RESTIC_PASSWORD`` or ``RESTIC_PASSWORD_FILE``. +A discussion is in progress over implementing unattended backups happens in +:issue:`533`. + +.. important:: Be careful how you set the environment; using the env + command, a `system()` call or using inline shell + scripts (e.g. `RESTIC_PASSWORD=password restic ...`) + might expose the credentials in the process list + directly and they will be readable to all users on a + system. Using export in a shell script file should be + safe, however, as the environment of a process is + `accessible only to that user`_. Please make sure that + the permissions on the files where the password is + eventually stored are safe (e.g. `0600` and owned by + root). + +.. _accessible only to that user: https://security.stackexchange.com/questions/14000/environment-variable-accessibility-in-linux/14009#14009 + +How to prioritize restic's IO and CPU time +------------------------------------------ + +If you'd like to change the **IO priority** of restic, run it in the following way + +:: + +$ ionice -c2 -n0 ./restic -r /media/your/backup/ backup /home + +This runs ``restic`` in the so-called best *effort class* (``-c2``), +with the highest possible priority (``-n0``). + +Take a look at the `ionice manpage`_ to learn about the other classes. + +.. _ionice manpage: https://linux.die.net/man/1/ionice + + +To change the **CPU scheduling priority** to a higher-than-standard +value, use would run: + +:: + +$ nice --10 ./restic -r /media/your/backup/ backup /home + +Again, the `nice manpage`_ has more information. + +.. _nice manpage: https://linux.die.net/man/1/nice + +You can also **combine IO and CPU scheduling priority**: + +:: + +$ ionice -c2 nice -n19 ./restic -r /media/gour/backup/ backup /home + +This example puts restic in the IO class 2 (best effort) and tells the CPU +scheduling algorithm to give it the least favorable niceness (19). + +The above example makes sure that the system the backup runs on +is not slowed down, which is particularly useful for servers. + +Creating new repo on a Synology NAS via sftp fails +-------------------------------------------------- + +Sometimes creating a new restic repository on a Synology NAS via sftp fails +with an error similar to the following: + +:: + + $ restic init -r sftp:user@nas:/volume1/restic-repo init + create backend at sftp:user@nas:/volume1/restic-repo/ failed: + mkdirAll(/volume1/restic-repo/index): unable to create directories: [...] + +Although you can log into the NAS via SSH and see that the directory structure +is there. + +The reason for this behavior is that apparently Synology NAS expose a different +directory structure via sftp, so the path that needs to be specified is +different than the directory structure on the device and maybe even as exposed +via other protocols. + + +Try removing the /volume1 prefix in your paths. If this does not work, use sftp +and ls to explore the SFTP file system hierarchy on your NAS. + +The following may work: + +:: + + $ restic init -r sftp:user@nas:/restic-repo init + +Why does restic perform so poorly on Windows? +--------------------------------------------- + +In some cases the real-time protection of antivirus software can interfere with restic's operations. If you are experiencing bad performance you can try to temporarily disable your antivirus software to find out if it is the cause for your performance problems. diff --git a/doc/images/aws_s3/01_aws_start.png b/doc/images/aws_s3/01_aws_start.png new file mode 100644 index 000000000..de8bfb392 Binary files /dev/null and b/doc/images/aws_s3/01_aws_start.png differ diff --git a/doc/images/aws_s3/02_aws_menu.png b/doc/images/aws_s3/02_aws_menu.png new file mode 100644 index 000000000..0babf55e8 Binary files /dev/null and b/doc/images/aws_s3/02_aws_menu.png differ diff --git a/doc/images/aws_s3/03_buckets_list_before.png b/doc/images/aws_s3/03_buckets_list_before.png new file mode 100644 index 000000000..11d9a4b88 Binary files /dev/null and b/doc/images/aws_s3/03_buckets_list_before.png differ diff --git a/doc/images/aws_s3/04_bucket_create_start.png b/doc/images/aws_s3/04_bucket_create_start.png new file mode 100644 index 000000000..5cce901f6 Binary files /dev/null and b/doc/images/aws_s3/04_bucket_create_start.png differ diff --git a/doc/images/aws_s3/05_bucket_create_review.png b/doc/images/aws_s3/05_bucket_create_review.png new file mode 100644 index 000000000..c2d415f4b Binary files /dev/null and b/doc/images/aws_s3/05_bucket_create_review.png differ diff --git a/doc/images/aws_s3/06_buckets_list_after.png b/doc/images/aws_s3/06_buckets_list_after.png new file mode 100644 index 000000000..ba982a3fc Binary files /dev/null and b/doc/images/aws_s3/06_buckets_list_after.png differ diff --git a/doc/images/aws_s3/07_iam_start.png b/doc/images/aws_s3/07_iam_start.png new file mode 100644 index 000000000..fad221083 Binary files /dev/null and b/doc/images/aws_s3/07_iam_start.png differ diff --git a/doc/images/aws_s3/08_user_list.png b/doc/images/aws_s3/08_user_list.png new file mode 100644 index 000000000..51723c48a Binary files /dev/null and b/doc/images/aws_s3/08_user_list.png differ diff --git a/doc/images/aws_s3/09_user_name.png b/doc/images/aws_s3/09_user_name.png new file mode 100644 index 000000000..b4a3812ce Binary files /dev/null and b/doc/images/aws_s3/09_user_name.png differ diff --git a/doc/images/aws_s3/10_user_pre_policy.png b/doc/images/aws_s3/10_user_pre_policy.png new file mode 100644 index 000000000..cfb569d92 Binary files /dev/null and b/doc/images/aws_s3/10_user_pre_policy.png differ diff --git a/doc/images/aws_s3/11_policy_start.png b/doc/images/aws_s3/11_policy_start.png new file mode 100644 index 000000000..2a16e62e4 Binary files /dev/null and b/doc/images/aws_s3/11_policy_start.png differ diff --git a/doc/images/aws_s3/13_policy_review.png b/doc/images/aws_s3/13_policy_review.png new file mode 100644 index 000000000..4ec422cd8 Binary files /dev/null and b/doc/images/aws_s3/13_policy_review.png differ diff --git a/doc/images/aws_s3/14_user_attach_policy.png b/doc/images/aws_s3/14_user_attach_policy.png new file mode 100644 index 000000000..beb1a0c2f Binary files /dev/null and b/doc/images/aws_s3/14_user_attach_policy.png differ diff --git a/doc/images/aws_s3/15_user_review.png b/doc/images/aws_s3/15_user_review.png new file mode 100644 index 000000000..2547c472a Binary files /dev/null and b/doc/images/aws_s3/15_user_review.png differ diff --git a/doc/images/aws_s3/16_user_created.png b/doc/images/aws_s3/16_user_created.png new file mode 100644 index 000000000..cf9fe114e Binary files /dev/null and b/doc/images/aws_s3/16_user_created.png differ diff --git a/doc/index.rst b/doc/index.rst new file mode 100644 index 000000000..69bbb8483 --- /dev/null +++ b/doc/index.rst @@ -0,0 +1,22 @@ +Restic Documentation +==================== + +.. toctree:: + :maxdepth: 2 + + 010_introduction + 020_installation + 030_preparing_a_new_repo + 040_backup + 045_working_with_repos + 050_restore + 060_forget + 070_encryption + 075_scripting + 080_examples + 090_participating + 100_references + 110_talks + faq + manual_rest + developer_information diff --git a/doc/logo/font/Lemon-Regular.ttf b/doc/logo/font/Lemon-Regular.ttf new file mode 100644 index 000000000..fb44b9fa0 Binary files /dev/null and b/doc/logo/font/Lemon-Regular.ttf differ diff --git a/doc/logo/font/OFL.txt b/doc/logo/font/OFL.txt new file mode 100644 index 000000000..89cdc64cb --- /dev/null +++ b/doc/logo/font/OFL.txt @@ -0,0 +1,93 @@ +Copyright (c) 2011, Eduardo Tunni (http://www.tipo.net.ar), +with Reserved Font Name "Lemon" +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +http://scripts.sil.org/OFL + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/doc/logo/logo.png b/doc/logo/logo.png new file mode 100644 index 000000000..21c5884bc Binary files /dev/null and b/doc/logo/logo.png differ diff --git a/doc/man/restic-backup.1 b/doc/man/restic-backup.1 new file mode 100644 index 000000000..7feae2a20 --- /dev/null +++ b/doc/man/restic-backup.1 @@ -0,0 +1,152 @@ +.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" "" +.nh +.ad l + + +.SH NAME +.PP +restic\-backup \- Create a new backup of files and/or directories + + +.SH SYNOPSIS +.PP +\fBrestic backup [flags] FILE/DIR [FILE/DIR] ...\fP + + +.SH DESCRIPTION +.PP +The "backup" command creates a new snapshot and saves the files and directories +given as the arguments. + + +.SH OPTIONS +.PP +\fB\-e\fP, \fB\-\-exclude\fP=[] + exclude a \fB\fCpattern\fR (can be specified multiple times) + +.PP +\fB\-\-exclude\-caches\fP[=false] + excludes cache directories that are marked with a CACHEDIR.TAG file + +.PP +\fB\-\-exclude\-file\fP=[] + read exclude patterns from a \fB\fCfile\fR (can be specified multiple times) + +.PP +\fB\-\-exclude\-if\-present\fP=[] + takes filename[:header], exclude contents of directories containing filename (except filename itself) if header of that file is as provided (can be specified multiple times) + +.PP +\fB\-\-files\-from\fP=[] + read the files to backup from file (can be combined with file args/can be specified multiple times) + +.PP +\fB\-f\fP, \fB\-\-force\fP[=false] + force re\-reading the target files/directories (overrides the "parent" flag) + +.PP +\fB\-h\fP, \fB\-\-help\fP[=false] + help for backup + +.PP +\fB\-H\fP, \fB\-\-host\fP="" + set the \fB\fChostname\fR for the snapshot manually. To prevent an expensive rescan use the "parent" flag + +.PP +\fB\-x\fP, \fB\-\-one\-file\-system\fP[=false] + exclude other file systems + +.PP +\fB\-\-parent\fP="" + use this parent snapshot (default: last snapshot in the repo that has the same target files/directories) + +.PP +\fB\-\-stdin\fP[=false] + read backup from stdin + +.PP +\fB\-\-stdin\-filename\fP="stdin" + file name to use when reading from stdin + +.PP +\fB\-\-tag\fP=[] + add a \fB\fCtag\fR for the new snapshot (can be specified multiple times) + +.PP +\fB\-\-time\fP="" + time of the backup (ex. '2012\-11\-01 22:08:41') (default: now) + +.PP +\fB\-\-with\-atime\fP[=false] + store the atime for all files and directories + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB\-\-cacert\fP=[] + \fB\fCfile\fR to load root certificates from (default: use system certificates) + +.PP +\fB\-\-cache\-dir\fP="" + set the cache directory. (default: use system default cache directory) + +.PP +\fB\-\-cleanup\-cache\fP[=false] + auto remove old cache directories + +.PP +\fB\-\-json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB\-\-key\-hint\fP="" + key ID of key to try decrypting first (default: $RESTIC\_KEY\_HINT) + +.PP +\fB\-\-limit\-download\fP=0 + limits downloads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-limit\-upload\fP=0 + limits uploads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-no\-cache\fP[=false] + do not use a local cache + +.PP +\fB\-\-no\-lock\fP[=false] + do not lock the repo, this allows some operations on read\-only repos + +.PP +\fB\-o\fP, \fB\-\-option\fP=[] + set extended option (\fB\fCkey=value\fR, can be specified multiple times) + +.PP +\fB\-\-password\-command\fP="" + specify a shell command to obtain a password (default: $RESTIC\_PASSWORD\_COMMAND) + +.PP +\fB\-p\fP, \fB\-\-password\-file\fP="" + read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE) + +.PP +\fB\-q\fP, \fB\-\-quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB\-r\fP, \fB\-\-repo\fP="" + repository to backup to or restore from (default: $RESTIC\_REPOSITORY) + +.PP +\fB\-\-tls\-client\-cert\fP="" + path to a file containing PEM encoded TLS client certificate and private key + +.PP +\fB\-v\fP, \fB\-\-verbose\fP[=0] + be verbose (specify \-\-verbose multiple times or level \fB\fCn\fR) + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic-cache.1 b/doc/man/restic-cache.1 new file mode 100644 index 000000000..007e88eb6 --- /dev/null +++ b/doc/man/restic-cache.1 @@ -0,0 +1,107 @@ +.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" "" +.nh +.ad l + + +.SH NAME +.PP +restic\-cache \- Operate on local cache directories + + +.SH SYNOPSIS +.PP +\fBrestic cache [flags]\fP + + +.SH DESCRIPTION +.PP +The "cache" command allows listing and cleaning local cache directories. + + +.SH OPTIONS +.PP +\fB\-\-cleanup\fP[=false] + remove old cache directories + +.PP +\fB\-h\fP, \fB\-\-help\fP[=false] + help for cache + +.PP +\fB\-\-max\-age\fP=30 + max age in \fB\fCdays\fR for cache directories to be considered old + +.PP +\fB\-\-no\-size\fP[=false] + do not output the size of the cache directories + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB\-\-cacert\fP=[] + \fB\fCfile\fR to load root certificates from (default: use system certificates) + +.PP +\fB\-\-cache\-dir\fP="" + set the cache directory. (default: use system default cache directory) + +.PP +\fB\-\-cleanup\-cache\fP[=false] + auto remove old cache directories + +.PP +\fB\-\-json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB\-\-key\-hint\fP="" + key ID of key to try decrypting first (default: $RESTIC\_KEY\_HINT) + +.PP +\fB\-\-limit\-download\fP=0 + limits downloads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-limit\-upload\fP=0 + limits uploads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-no\-cache\fP[=false] + do not use a local cache + +.PP +\fB\-\-no\-lock\fP[=false] + do not lock the repo, this allows some operations on read\-only repos + +.PP +\fB\-o\fP, \fB\-\-option\fP=[] + set extended option (\fB\fCkey=value\fR, can be specified multiple times) + +.PP +\fB\-\-password\-command\fP="" + specify a shell command to obtain a password (default: $RESTIC\_PASSWORD\_COMMAND) + +.PP +\fB\-p\fP, \fB\-\-password\-file\fP="" + read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE) + +.PP +\fB\-q\fP, \fB\-\-quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB\-r\fP, \fB\-\-repo\fP="" + repository to backup to or restore from (default: $RESTIC\_REPOSITORY) + +.PP +\fB\-\-tls\-client\-cert\fP="" + path to a file containing PEM encoded TLS client certificate and private key + +.PP +\fB\-v\fP, \fB\-\-verbose\fP[=0] + be verbose (specify \-\-verbose multiple times or level \fB\fCn\fR) + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic-cat.1 b/doc/man/restic-cat.1 new file mode 100644 index 000000000..7f586cb9a --- /dev/null +++ b/doc/man/restic-cat.1 @@ -0,0 +1,95 @@ +.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" "" +.nh +.ad l + + +.SH NAME +.PP +restic\-cat \- Print internal objects to stdout + + +.SH SYNOPSIS +.PP +\fBrestic cat [flags] [pack|blob|snapshot|index|key|masterkey|config|lock] ID\fP + + +.SH DESCRIPTION +.PP +The "cat" command is used to print internal objects to stdout. + + +.SH OPTIONS +.PP +\fB\-h\fP, \fB\-\-help\fP[=false] + help for cat + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB\-\-cacert\fP=[] + \fB\fCfile\fR to load root certificates from (default: use system certificates) + +.PP +\fB\-\-cache\-dir\fP="" + set the cache directory. (default: use system default cache directory) + +.PP +\fB\-\-cleanup\-cache\fP[=false] + auto remove old cache directories + +.PP +\fB\-\-json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB\-\-key\-hint\fP="" + key ID of key to try decrypting first (default: $RESTIC\_KEY\_HINT) + +.PP +\fB\-\-limit\-download\fP=0 + limits downloads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-limit\-upload\fP=0 + limits uploads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-no\-cache\fP[=false] + do not use a local cache + +.PP +\fB\-\-no\-lock\fP[=false] + do not lock the repo, this allows some operations on read\-only repos + +.PP +\fB\-o\fP, \fB\-\-option\fP=[] + set extended option (\fB\fCkey=value\fR, can be specified multiple times) + +.PP +\fB\-\-password\-command\fP="" + specify a shell command to obtain a password (default: $RESTIC\_PASSWORD\_COMMAND) + +.PP +\fB\-p\fP, \fB\-\-password\-file\fP="" + read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE) + +.PP +\fB\-q\fP, \fB\-\-quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB\-r\fP, \fB\-\-repo\fP="" + repository to backup to or restore from (default: $RESTIC\_REPOSITORY) + +.PP +\fB\-\-tls\-client\-cert\fP="" + path to a file containing PEM encoded TLS client certificate and private key + +.PP +\fB\-v\fP, \fB\-\-verbose\fP[=0] + be verbose (specify \-\-verbose multiple times or level \fB\fCn\fR) + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic-check.1 b/doc/man/restic-check.1 new file mode 100644 index 000000000..d5047306a --- /dev/null +++ b/doc/man/restic-check.1 @@ -0,0 +1,116 @@ +.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" "" +.nh +.ad l + + +.SH NAME +.PP +restic\-check \- Check the repository for errors + + +.SH SYNOPSIS +.PP +\fBrestic check [flags]\fP + + +.SH DESCRIPTION +.PP +The "check" command tests the repository for errors and reports any errors it +finds. It can also be used to read all data and therefore simulate a restore. + +.PP +By default, the "check" command will always load all data directly from the +repository and not use a local cache. + + +.SH OPTIONS +.PP +\fB\-\-check\-unused\fP[=false] + find unused blobs + +.PP +\fB\-h\fP, \fB\-\-help\fP[=false] + help for check + +.PP +\fB\-\-read\-data\fP[=false] + read all data blobs + +.PP +\fB\-\-read\-data\-subset\fP="" + read subset n of m data packs (format: \fB\fCn/m\fR) + +.PP +\fB\-\-with\-cache\fP[=false] + use the cache + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB\-\-cacert\fP=[] + \fB\fCfile\fR to load root certificates from (default: use system certificates) + +.PP +\fB\-\-cache\-dir\fP="" + set the cache directory. (default: use system default cache directory) + +.PP +\fB\-\-cleanup\-cache\fP[=false] + auto remove old cache directories + +.PP +\fB\-\-json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB\-\-key\-hint\fP="" + key ID of key to try decrypting first (default: $RESTIC\_KEY\_HINT) + +.PP +\fB\-\-limit\-download\fP=0 + limits downloads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-limit\-upload\fP=0 + limits uploads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-no\-cache\fP[=false] + do not use a local cache + +.PP +\fB\-\-no\-lock\fP[=false] + do not lock the repo, this allows some operations on read\-only repos + +.PP +\fB\-o\fP, \fB\-\-option\fP=[] + set extended option (\fB\fCkey=value\fR, can be specified multiple times) + +.PP +\fB\-\-password\-command\fP="" + specify a shell command to obtain a password (default: $RESTIC\_PASSWORD\_COMMAND) + +.PP +\fB\-p\fP, \fB\-\-password\-file\fP="" + read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE) + +.PP +\fB\-q\fP, \fB\-\-quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB\-r\fP, \fB\-\-repo\fP="" + repository to backup to or restore from (default: $RESTIC\_REPOSITORY) + +.PP +\fB\-\-tls\-client\-cert\fP="" + path to a file containing PEM encoded TLS client certificate and private key + +.PP +\fB\-v\fP, \fB\-\-verbose\fP[=0] + be verbose (specify \-\-verbose multiple times or level \fB\fCn\fR) + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic-diff.1 b/doc/man/restic-diff.1 new file mode 100644 index 000000000..9bd147412 --- /dev/null +++ b/doc/man/restic-diff.1 @@ -0,0 +1,115 @@ +.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" "" +.nh +.ad l + + +.SH NAME +.PP +restic\-diff \- Show differences between two snapshots + + +.SH SYNOPSIS +.PP +\fBrestic diff snapshot\-ID snapshot\-ID [flags]\fP + + +.SH DESCRIPTION +.PP +The "diff" command shows differences from the first to the second snapshot. The +first characters in each line display what has happened to a particular file or +directory: + +.RS +.IP \(bu 2 ++ The item was added +.IP \(bu 2 +\- The item was removed +.IP \(bu 2 +U The metadata (access mode, timestamps, ...) for the item was updated +.IP \(bu 2 +M The file's content was modified +.IP \(bu 2 +T The type was changed, e.g. a file was made a symlink + +.RE + + +.SH OPTIONS +.PP +\fB\-h\fP, \fB\-\-help\fP[=false] + help for diff + +.PP +\fB\-\-metadata\fP[=false] + print changes in metadata + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB\-\-cacert\fP=[] + \fB\fCfile\fR to load root certificates from (default: use system certificates) + +.PP +\fB\-\-cache\-dir\fP="" + set the cache directory. (default: use system default cache directory) + +.PP +\fB\-\-cleanup\-cache\fP[=false] + auto remove old cache directories + +.PP +\fB\-\-json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB\-\-key\-hint\fP="" + key ID of key to try decrypting first (default: $RESTIC\_KEY\_HINT) + +.PP +\fB\-\-limit\-download\fP=0 + limits downloads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-limit\-upload\fP=0 + limits uploads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-no\-cache\fP[=false] + do not use a local cache + +.PP +\fB\-\-no\-lock\fP[=false] + do not lock the repo, this allows some operations on read\-only repos + +.PP +\fB\-o\fP, \fB\-\-option\fP=[] + set extended option (\fB\fCkey=value\fR, can be specified multiple times) + +.PP +\fB\-\-password\-command\fP="" + specify a shell command to obtain a password (default: $RESTIC\_PASSWORD\_COMMAND) + +.PP +\fB\-p\fP, \fB\-\-password\-file\fP="" + read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE) + +.PP +\fB\-q\fP, \fB\-\-quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB\-r\fP, \fB\-\-repo\fP="" + repository to backup to or restore from (default: $RESTIC\_REPOSITORY) + +.PP +\fB\-\-tls\-client\-cert\fP="" + path to a file containing PEM encoded TLS client certificate and private key + +.PP +\fB\-v\fP, \fB\-\-verbose\fP[=0] + be verbose (specify \-\-verbose multiple times or level \fB\fCn\fR) + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic-dump.1 b/doc/man/restic-dump.1 new file mode 100644 index 000000000..fac6a06fc --- /dev/null +++ b/doc/man/restic-dump.1 @@ -0,0 +1,112 @@ +.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" "" +.nh +.ad l + + +.SH NAME +.PP +restic\-dump \- Print a backed\-up file to stdout + + +.SH SYNOPSIS +.PP +\fBrestic dump [flags] snapshotID file\fP + + +.SH DESCRIPTION +.PP +The "dump" command extracts a single file from a snapshot from the repository and +prints its contents to stdout. + +.PP +The special snapshot "latest" can be used to use the latest snapshot in the +repository. + + +.SH OPTIONS +.PP +\fB\-h\fP, \fB\-\-help\fP[=false] + help for dump + +.PP +\fB\-H\fP, \fB\-\-host\fP="" + only consider snapshots for this host when the snapshot ID is "latest" + +.PP +\fB\-\-path\fP=[] + only consider snapshots which include this (absolute) \fB\fCpath\fR for snapshot ID "latest" + +.PP +\fB\-\-tag\fP=[] + only consider snapshots which include this \fB\fCtaglist\fR for snapshot ID "latest" + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB\-\-cacert\fP=[] + \fB\fCfile\fR to load root certificates from (default: use system certificates) + +.PP +\fB\-\-cache\-dir\fP="" + set the cache directory. (default: use system default cache directory) + +.PP +\fB\-\-cleanup\-cache\fP[=false] + auto remove old cache directories + +.PP +\fB\-\-json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB\-\-key\-hint\fP="" + key ID of key to try decrypting first (default: $RESTIC\_KEY\_HINT) + +.PP +\fB\-\-limit\-download\fP=0 + limits downloads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-limit\-upload\fP=0 + limits uploads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-no\-cache\fP[=false] + do not use a local cache + +.PP +\fB\-\-no\-lock\fP[=false] + do not lock the repo, this allows some operations on read\-only repos + +.PP +\fB\-o\fP, \fB\-\-option\fP=[] + set extended option (\fB\fCkey=value\fR, can be specified multiple times) + +.PP +\fB\-\-password\-command\fP="" + specify a shell command to obtain a password (default: $RESTIC\_PASSWORD\_COMMAND) + +.PP +\fB\-p\fP, \fB\-\-password\-file\fP="" + read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE) + +.PP +\fB\-q\fP, \fB\-\-quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB\-r\fP, \fB\-\-repo\fP="" + repository to backup to or restore from (default: $RESTIC\_REPOSITORY) + +.PP +\fB\-\-tls\-client\-cert\fP="" + path to a file containing PEM encoded TLS client certificate and private key + +.PP +\fB\-v\fP, \fB\-\-verbose\fP[=0] + be verbose (specify \-\-verbose multiple times or level \fB\fCn\fR) + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic-find.1 b/doc/man/restic-find.1 new file mode 100644 index 000000000..dd4f6bf0f --- /dev/null +++ b/doc/man/restic-find.1 @@ -0,0 +1,161 @@ +.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" "" +.nh +.ad l + + +.SH NAME +.PP +restic\-find \- Find a file, a directory or restic IDs + + +.SH SYNOPSIS +.PP +\fBrestic find [flags] PATTERN...\fP + + +.SH DESCRIPTION +.PP +The "find" command searches for files or directories in snapshots stored in the +repo. +It can also be used to search for restic blobs or trees for troubleshooting. + + +.SH OPTIONS +.PP +\fB\-\-blob\fP[=false] + pattern is a blob\-ID + +.PP +\fB\-h\fP, \fB\-\-help\fP[=false] + help for find + +.PP +\fB\-H\fP, \fB\-\-host\fP="" + only consider snapshots for this \fB\fChost\fR, when no snapshot ID is given + +.PP +\fB\-i\fP, \fB\-\-ignore\-case\fP[=false] + ignore case for pattern + +.PP +\fB\-l\fP, \fB\-\-long\fP[=false] + use a long listing format showing size and mode + +.PP +\fB\-N\fP, \fB\-\-newest\fP="" + newest modification date/time + +.PP +\fB\-O\fP, \fB\-\-oldest\fP="" + oldest modification date/time + +.PP +\fB\-\-pack\fP[=false] + pattern is a pack\-ID + +.PP +\fB\-\-path\fP=[] + only consider snapshots which include this (absolute) \fB\fCpath\fR, when no snapshot\-ID is given + +.PP +\fB\-\-show\-pack\-id\fP[=false] + display the pack\-ID the blobs belong to (with \-\-blob) + +.PP +\fB\-s\fP, \fB\-\-snapshot\fP=[] + snapshot \fB\fCid\fR to search in (can be given multiple times) + +.PP +\fB\-\-tag\fP=[] + only consider snapshots which include this \fB\fCtaglist\fR, when no snapshot\-ID is given + +.PP +\fB\-\-tree\fP[=false] + pattern is a tree\-ID + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB\-\-cacert\fP=[] + \fB\fCfile\fR to load root certificates from (default: use system certificates) + +.PP +\fB\-\-cache\-dir\fP="" + set the cache directory. (default: use system default cache directory) + +.PP +\fB\-\-cleanup\-cache\fP[=false] + auto remove old cache directories + +.PP +\fB\-\-json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB\-\-key\-hint\fP="" + key ID of key to try decrypting first (default: $RESTIC\_KEY\_HINT) + +.PP +\fB\-\-limit\-download\fP=0 + limits downloads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-limit\-upload\fP=0 + limits uploads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-no\-cache\fP[=false] + do not use a local cache + +.PP +\fB\-\-no\-lock\fP[=false] + do not lock the repo, this allows some operations on read\-only repos + +.PP +\fB\-o\fP, \fB\-\-option\fP=[] + set extended option (\fB\fCkey=value\fR, can be specified multiple times) + +.PP +\fB\-\-password\-command\fP="" + specify a shell command to obtain a password (default: $RESTIC\_PASSWORD\_COMMAND) + +.PP +\fB\-p\fP, \fB\-\-password\-file\fP="" + read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE) + +.PP +\fB\-q\fP, \fB\-\-quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB\-r\fP, \fB\-\-repo\fP="" + repository to backup to or restore from (default: $RESTIC\_REPOSITORY) + +.PP +\fB\-\-tls\-client\-cert\fP="" + path to a file containing PEM encoded TLS client certificate and private key + +.PP +\fB\-v\fP, \fB\-\-verbose\fP[=0] + be verbose (specify \-\-verbose multiple times or level \fB\fCn\fR) + + +.SH EXAMPLE +.PP +.RS + +.nf +restic find config.json +restic find \-\-json "*.yml" "*.json" +restic find \-\-json \-\-blob 420f620f b46ebe8a ddd38656 +restic find \-\-show\-pack\-id \-\-blob 420f620f +restic find \-\-tree 577c2bc9 f81f2e22 a62827a9 +restic find \-\-pack 025c1d06 + +.fi +.RE + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic-forget.1 b/doc/man/restic-forget.1 new file mode 100644 index 000000000..17e79f336 --- /dev/null +++ b/doc/man/restic-forget.1 @@ -0,0 +1,158 @@ +.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" "" +.nh +.ad l + + +.SH NAME +.PP +restic\-forget \- Remove snapshots from the repository + + +.SH SYNOPSIS +.PP +\fBrestic forget [flags] [snapshot ID] [...]\fP + + +.SH DESCRIPTION +.PP +The "forget" command removes snapshots according to a policy. Please note that +this command really only deletes the snapshot object in the repository, which +is a reference to data stored there. In order to remove this (now unreferenced) +data after 'forget' was run successfully, see the 'prune' command. + + +.SH OPTIONS +.PP +\fB\-l\fP, \fB\-\-keep\-last\fP=0 + keep the last \fB\fCn\fR snapshots + +.PP +\fB\-H\fP, \fB\-\-keep\-hourly\fP=0 + keep the last \fB\fCn\fR hourly snapshots + +.PP +\fB\-d\fP, \fB\-\-keep\-daily\fP=0 + keep the last \fB\fCn\fR daily snapshots + +.PP +\fB\-w\fP, \fB\-\-keep\-weekly\fP=0 + keep the last \fB\fCn\fR weekly snapshots + +.PP +\fB\-m\fP, \fB\-\-keep\-monthly\fP=0 + keep the last \fB\fCn\fR monthly snapshots + +.PP +\fB\-y\fP, \fB\-\-keep\-yearly\fP=0 + keep the last \fB\fCn\fR yearly snapshots + +.PP +\fB\-\-keep\-within\fP= + keep snapshots that are newer than \fB\fCduration\fR (eg. 1y5m7d2h) relative to the latest snapshot + +.PP +\fB\-\-keep\-tag\fP=[] + keep snapshots with this \fB\fCtaglist\fR (can be specified multiple times) + +.PP +\fB\-\-host\fP="" + only consider snapshots with the given \fB\fChost\fR + +.PP +\fB\-\-tag\fP=[] + only consider snapshots which include this \fB\fCtaglist\fR in the format \fB\fCtag[,tag,...]\fR (can be specified multiple times) + +.PP +\fB\-\-path\fP=[] + only consider snapshots which include this (absolute) \fB\fCpath\fR (can be specified multiple times) + +.PP +\fB\-c\fP, \fB\-\-compact\fP[=false] + use compact format + +.PP +\fB\-g\fP, \fB\-\-group\-by\fP="host,paths" + string for grouping snapshots by host,paths,tags + +.PP +\fB\-n\fP, \fB\-\-dry\-run\fP[=false] + do not delete anything, just print what would be done + +.PP +\fB\-\-prune\fP[=false] + automatically run the 'prune' command if snapshots have been removed + +.PP +\fB\-h\fP, \fB\-\-help\fP[=false] + help for forget + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB\-\-cacert\fP=[] + \fB\fCfile\fR to load root certificates from (default: use system certificates) + +.PP +\fB\-\-cache\-dir\fP="" + set the cache directory. (default: use system default cache directory) + +.PP +\fB\-\-cleanup\-cache\fP[=false] + auto remove old cache directories + +.PP +\fB\-\-json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB\-\-key\-hint\fP="" + key ID of key to try decrypting first (default: $RESTIC\_KEY\_HINT) + +.PP +\fB\-\-limit\-download\fP=0 + limits downloads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-limit\-upload\fP=0 + limits uploads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-no\-cache\fP[=false] + do not use a local cache + +.PP +\fB\-\-no\-lock\fP[=false] + do not lock the repo, this allows some operations on read\-only repos + +.PP +\fB\-o\fP, \fB\-\-option\fP=[] + set extended option (\fB\fCkey=value\fR, can be specified multiple times) + +.PP +\fB\-\-password\-command\fP="" + specify a shell command to obtain a password (default: $RESTIC\_PASSWORD\_COMMAND) + +.PP +\fB\-p\fP, \fB\-\-password\-file\fP="" + read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE) + +.PP +\fB\-q\fP, \fB\-\-quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB\-r\fP, \fB\-\-repo\fP="" + repository to backup to or restore from (default: $RESTIC\_REPOSITORY) + +.PP +\fB\-\-tls\-client\-cert\fP="" + path to a file containing PEM encoded TLS client certificate and private key + +.PP +\fB\-v\fP, \fB\-\-verbose\fP[=0] + be verbose (specify \-\-verbose multiple times or level \fB\fCn\fR) + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic-generate.1 b/doc/man/restic-generate.1 new file mode 100644 index 000000000..91c963152 --- /dev/null +++ b/doc/man/restic-generate.1 @@ -0,0 +1,108 @@ +.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" "" +.nh +.ad l + + +.SH NAME +.PP +restic\-generate \- Generate manual pages and auto\-completion files (bash, zsh) + + +.SH SYNOPSIS +.PP +\fBrestic generate [command] [flags]\fP + + +.SH DESCRIPTION +.PP +The "generate" command writes automatically generated files like the man pages +and the auto\-completion files for bash and zsh). + + +.SH OPTIONS +.PP +\fB\-\-bash\-completion\fP="" + write bash completion \fB\fCfile\fR + +.PP +\fB\-h\fP, \fB\-\-help\fP[=false] + help for generate + +.PP +\fB\-\-man\fP="" + write man pages to \fB\fCdirectory\fR + +.PP +\fB\-\-zsh\-completion\fP="" + write zsh completion \fB\fCfile\fR + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB\-\-cacert\fP=[] + \fB\fCfile\fR to load root certificates from (default: use system certificates) + +.PP +\fB\-\-cache\-dir\fP="" + set the cache directory. (default: use system default cache directory) + +.PP +\fB\-\-cleanup\-cache\fP[=false] + auto remove old cache directories + +.PP +\fB\-\-json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB\-\-key\-hint\fP="" + key ID of key to try decrypting first (default: $RESTIC\_KEY\_HINT) + +.PP +\fB\-\-limit\-download\fP=0 + limits downloads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-limit\-upload\fP=0 + limits uploads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-no\-cache\fP[=false] + do not use a local cache + +.PP +\fB\-\-no\-lock\fP[=false] + do not lock the repo, this allows some operations on read\-only repos + +.PP +\fB\-o\fP, \fB\-\-option\fP=[] + set extended option (\fB\fCkey=value\fR, can be specified multiple times) + +.PP +\fB\-\-password\-command\fP="" + specify a shell command to obtain a password (default: $RESTIC\_PASSWORD\_COMMAND) + +.PP +\fB\-p\fP, \fB\-\-password\-file\fP="" + read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE) + +.PP +\fB\-q\fP, \fB\-\-quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB\-r\fP, \fB\-\-repo\fP="" + repository to backup to or restore from (default: $RESTIC\_REPOSITORY) + +.PP +\fB\-\-tls\-client\-cert\fP="" + path to a file containing PEM encoded TLS client certificate and private key + +.PP +\fB\-v\fP, \fB\-\-verbose\fP[=0] + be verbose (specify \-\-verbose multiple times or level \fB\fCn\fR) + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic-init.1 b/doc/man/restic-init.1 new file mode 100644 index 000000000..f459ce0a0 --- /dev/null +++ b/doc/man/restic-init.1 @@ -0,0 +1,95 @@ +.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" "" +.nh +.ad l + + +.SH NAME +.PP +restic\-init \- Initialize a new repository + + +.SH SYNOPSIS +.PP +\fBrestic init [flags]\fP + + +.SH DESCRIPTION +.PP +The "init" command initializes a new repository. + + +.SH OPTIONS +.PP +\fB\-h\fP, \fB\-\-help\fP[=false] + help for init + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB\-\-cacert\fP=[] + \fB\fCfile\fR to load root certificates from (default: use system certificates) + +.PP +\fB\-\-cache\-dir\fP="" + set the cache directory. (default: use system default cache directory) + +.PP +\fB\-\-cleanup\-cache\fP[=false] + auto remove old cache directories + +.PP +\fB\-\-json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB\-\-key\-hint\fP="" + key ID of key to try decrypting first (default: $RESTIC\_KEY\_HINT) + +.PP +\fB\-\-limit\-download\fP=0 + limits downloads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-limit\-upload\fP=0 + limits uploads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-no\-cache\fP[=false] + do not use a local cache + +.PP +\fB\-\-no\-lock\fP[=false] + do not lock the repo, this allows some operations on read\-only repos + +.PP +\fB\-o\fP, \fB\-\-option\fP=[] + set extended option (\fB\fCkey=value\fR, can be specified multiple times) + +.PP +\fB\-\-password\-command\fP="" + specify a shell command to obtain a password (default: $RESTIC\_PASSWORD\_COMMAND) + +.PP +\fB\-p\fP, \fB\-\-password\-file\fP="" + read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE) + +.PP +\fB\-q\fP, \fB\-\-quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB\-r\fP, \fB\-\-repo\fP="" + repository to backup to or restore from (default: $RESTIC\_REPOSITORY) + +.PP +\fB\-\-tls\-client\-cert\fP="" + path to a file containing PEM encoded TLS client certificate and private key + +.PP +\fB\-v\fP, \fB\-\-verbose\fP[=0] + be verbose (specify \-\-verbose multiple times or level \fB\fCn\fR) + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic-key.1 b/doc/man/restic-key.1 new file mode 100644 index 000000000..950683c18 --- /dev/null +++ b/doc/man/restic-key.1 @@ -0,0 +1,99 @@ +.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" "" +.nh +.ad l + + +.SH NAME +.PP +restic\-key \- Manage keys (passwords) + + +.SH SYNOPSIS +.PP +\fBrestic key [list|add|remove|passwd] [ID] [flags]\fP + + +.SH DESCRIPTION +.PP +The "key" command manages keys (passwords) for accessing the repository. + + +.SH OPTIONS +.PP +\fB\-h\fP, \fB\-\-help\fP[=false] + help for key + +.PP +\fB\-\-new\-password\-file\fP="" + the file from which to load a new password + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB\-\-cacert\fP=[] + \fB\fCfile\fR to load root certificates from (default: use system certificates) + +.PP +\fB\-\-cache\-dir\fP="" + set the cache directory. (default: use system default cache directory) + +.PP +\fB\-\-cleanup\-cache\fP[=false] + auto remove old cache directories + +.PP +\fB\-\-json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB\-\-key\-hint\fP="" + key ID of key to try decrypting first (default: $RESTIC\_KEY\_HINT) + +.PP +\fB\-\-limit\-download\fP=0 + limits downloads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-limit\-upload\fP=0 + limits uploads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-no\-cache\fP[=false] + do not use a local cache + +.PP +\fB\-\-no\-lock\fP[=false] + do not lock the repo, this allows some operations on read\-only repos + +.PP +\fB\-o\fP, \fB\-\-option\fP=[] + set extended option (\fB\fCkey=value\fR, can be specified multiple times) + +.PP +\fB\-\-password\-command\fP="" + specify a shell command to obtain a password (default: $RESTIC\_PASSWORD\_COMMAND) + +.PP +\fB\-p\fP, \fB\-\-password\-file\fP="" + read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE) + +.PP +\fB\-q\fP, \fB\-\-quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB\-r\fP, \fB\-\-repo\fP="" + repository to backup to or restore from (default: $RESTIC\_REPOSITORY) + +.PP +\fB\-\-tls\-client\-cert\fP="" + path to a file containing PEM encoded TLS client certificate and private key + +.PP +\fB\-v\fP, \fB\-\-verbose\fP[=0] + be verbose (specify \-\-verbose multiple times or level \fB\fCn\fR) + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic-list.1 b/doc/man/restic-list.1 new file mode 100644 index 000000000..4f50db46e --- /dev/null +++ b/doc/man/restic-list.1 @@ -0,0 +1,95 @@ +.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" "" +.nh +.ad l + + +.SH NAME +.PP +restic\-list \- List objects in the repository + + +.SH SYNOPSIS +.PP +\fBrestic list [blobs|packs|index|snapshots|keys|locks] [flags]\fP + + +.SH DESCRIPTION +.PP +The "list" command allows listing objects in the repository based on type. + + +.SH OPTIONS +.PP +\fB\-h\fP, \fB\-\-help\fP[=false] + help for list + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB\-\-cacert\fP=[] + \fB\fCfile\fR to load root certificates from (default: use system certificates) + +.PP +\fB\-\-cache\-dir\fP="" + set the cache directory. (default: use system default cache directory) + +.PP +\fB\-\-cleanup\-cache\fP[=false] + auto remove old cache directories + +.PP +\fB\-\-json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB\-\-key\-hint\fP="" + key ID of key to try decrypting first (default: $RESTIC\_KEY\_HINT) + +.PP +\fB\-\-limit\-download\fP=0 + limits downloads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-limit\-upload\fP=0 + limits uploads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-no\-cache\fP[=false] + do not use a local cache + +.PP +\fB\-\-no\-lock\fP[=false] + do not lock the repo, this allows some operations on read\-only repos + +.PP +\fB\-o\fP, \fB\-\-option\fP=[] + set extended option (\fB\fCkey=value\fR, can be specified multiple times) + +.PP +\fB\-\-password\-command\fP="" + specify a shell command to obtain a password (default: $RESTIC\_PASSWORD\_COMMAND) + +.PP +\fB\-p\fP, \fB\-\-password\-file\fP="" + read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE) + +.PP +\fB\-q\fP, \fB\-\-quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB\-r\fP, \fB\-\-repo\fP="" + repository to backup to or restore from (default: $RESTIC\_REPOSITORY) + +.PP +\fB\-\-tls\-client\-cert\fP="" + path to a file containing PEM encoded TLS client certificate and private key + +.PP +\fB\-v\fP, \fB\-\-verbose\fP[=0] + be verbose (specify \-\-verbose multiple times or level \fB\fCn\fR) + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic-ls.1 b/doc/man/restic-ls.1 new file mode 100644 index 000000000..e838ca1ac --- /dev/null +++ b/doc/man/restic-ls.1 @@ -0,0 +1,130 @@ +.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" "" +.nh +.ad l + + +.SH NAME +.PP +restic\-ls \- List files in a snapshot + + +.SH SYNOPSIS +.PP +\fBrestic ls [flags] [snapshotID] [dir...]\fP + + +.SH DESCRIPTION +.PP +The "ls" command lists files and directories in a snapshot. + +.PP +The special snapshot ID "latest" can be used to list files and +directories of the latest snapshot in the repository. The +\-\-host flag can be used in conjunction to select the latest +snapshot originating from a certain host only. + +.PP +File listings can optionally be filtered by directories. Any +positional arguments after the snapshot ID are interpreted as +absolute directory paths, and only files inside those directories +will be listed. If the \-\-recursive flag is used, then the filter +will allow traversing into matching directories' subfolders. +Any directory paths specified must be absolute (starting with +a path separator); paths use the forward slash '/' as separator. + + +.SH OPTIONS +.PP +\fB\-h\fP, \fB\-\-help\fP[=false] + help for ls + +.PP +\fB\-H\fP, \fB\-\-host\fP="" + only consider snapshots for this \fB\fChost\fR, when no snapshot ID is given + +.PP +\fB\-l\fP, \fB\-\-long\fP[=false] + use a long listing format showing size and mode + +.PP +\fB\-\-path\fP=[] + only consider snapshots which include this (absolute) \fB\fCpath\fR, when no snapshot ID is given + +.PP +\fB\-\-recursive\fP[=false] + include files in subfolders of the listed directories + +.PP +\fB\-\-tag\fP=[] + only consider snapshots which include this \fB\fCtaglist\fR, when no snapshot ID is given + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB\-\-cacert\fP=[] + \fB\fCfile\fR to load root certificates from (default: use system certificates) + +.PP +\fB\-\-cache\-dir\fP="" + set the cache directory. (default: use system default cache directory) + +.PP +\fB\-\-cleanup\-cache\fP[=false] + auto remove old cache directories + +.PP +\fB\-\-json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB\-\-key\-hint\fP="" + key ID of key to try decrypting first (default: $RESTIC\_KEY\_HINT) + +.PP +\fB\-\-limit\-download\fP=0 + limits downloads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-limit\-upload\fP=0 + limits uploads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-no\-cache\fP[=false] + do not use a local cache + +.PP +\fB\-\-no\-lock\fP[=false] + do not lock the repo, this allows some operations on read\-only repos + +.PP +\fB\-o\fP, \fB\-\-option\fP=[] + set extended option (\fB\fCkey=value\fR, can be specified multiple times) + +.PP +\fB\-\-password\-command\fP="" + specify a shell command to obtain a password (default: $RESTIC\_PASSWORD\_COMMAND) + +.PP +\fB\-p\fP, \fB\-\-password\-file\fP="" + read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE) + +.PP +\fB\-q\fP, \fB\-\-quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB\-r\fP, \fB\-\-repo\fP="" + repository to backup to or restore from (default: $RESTIC\_REPOSITORY) + +.PP +\fB\-\-tls\-client\-cert\fP="" + path to a file containing PEM encoded TLS client certificate and private key + +.PP +\fB\-v\fP, \fB\-\-verbose\fP[=0] + be verbose (specify \-\-verbose multiple times or level \fB\fCn\fR) + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic-migrate.1 b/doc/man/restic-migrate.1 new file mode 100644 index 000000000..24b577b9e --- /dev/null +++ b/doc/man/restic-migrate.1 @@ -0,0 +1,100 @@ +.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" "" +.nh +.ad l + + +.SH NAME +.PP +restic\-migrate \- Apply migrations + + +.SH SYNOPSIS +.PP +\fBrestic migrate [name] [flags]\fP + + +.SH DESCRIPTION +.PP +The "migrate" command applies migrations to a repository. When no migration +name is explicitly given, a list of migrations that can be applied is printed. + + +.SH OPTIONS +.PP +\fB\-f\fP, \fB\-\-force\fP[=false] + apply a migration a second time + +.PP +\fB\-h\fP, \fB\-\-help\fP[=false] + help for migrate + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB\-\-cacert\fP=[] + \fB\fCfile\fR to load root certificates from (default: use system certificates) + +.PP +\fB\-\-cache\-dir\fP="" + set the cache directory. (default: use system default cache directory) + +.PP +\fB\-\-cleanup\-cache\fP[=false] + auto remove old cache directories + +.PP +\fB\-\-json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB\-\-key\-hint\fP="" + key ID of key to try decrypting first (default: $RESTIC\_KEY\_HINT) + +.PP +\fB\-\-limit\-download\fP=0 + limits downloads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-limit\-upload\fP=0 + limits uploads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-no\-cache\fP[=false] + do not use a local cache + +.PP +\fB\-\-no\-lock\fP[=false] + do not lock the repo, this allows some operations on read\-only repos + +.PP +\fB\-o\fP, \fB\-\-option\fP=[] + set extended option (\fB\fCkey=value\fR, can be specified multiple times) + +.PP +\fB\-\-password\-command\fP="" + specify a shell command to obtain a password (default: $RESTIC\_PASSWORD\_COMMAND) + +.PP +\fB\-p\fP, \fB\-\-password\-file\fP="" + read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE) + +.PP +\fB\-q\fP, \fB\-\-quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB\-r\fP, \fB\-\-repo\fP="" + repository to backup to or restore from (default: $RESTIC\_REPOSITORY) + +.PP +\fB\-\-tls\-client\-cert\fP="" + path to a file containing PEM encoded TLS client certificate and private key + +.PP +\fB\-v\fP, \fB\-\-verbose\fP[=0] + be verbose (specify \-\-verbose multiple times or level \fB\fCn\fR) + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic-mount.1 b/doc/man/restic-mount.1 new file mode 100644 index 000000000..58e3bc7b3 --- /dev/null +++ b/doc/man/restic-mount.1 @@ -0,0 +1,160 @@ +.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" "" +.nh +.ad l + + +.SH NAME +.PP +restic\-mount \- Mount the repository + + +.SH SYNOPSIS +.PP +\fBrestic mount [flags] mountpoint\fP + + +.SH DESCRIPTION +.PP +The "mount" command mounts the repository via fuse to a directory. This is a +read\-only mount. + + +.SH Snapshot Directories +.PP +If you need a different template for all directories that contain snapshots, +you can pass a template via \-\-snapshot\-template. Example without colons: + +.PP +.RS + +.nf +\-\-snapshot\-template "2006\-01\-02\_15\-04\-05" + +.fi +.RE + +.PP +You need to specify a sample format for exactly the following timestamp: + +.PP +.RS + +.nf +Mon Jan 2 15:04:05 \-0700 MST 2006 + +.fi +.RE + +.PP +For details please see the documentation for time.Format() at: + +\[la]https://godoc.org/time#Time.Format\[ra] + + +.SH OPTIONS +.PP +\fB\-\-allow\-other\fP[=false] + allow other users to access the data in the mounted directory + +.PP +\fB\-\-allow\-root\fP[=false] + allow root user to access the data in the mounted directory + +.PP +\fB\-h\fP, \fB\-\-help\fP[=false] + help for mount + +.PP +\fB\-H\fP, \fB\-\-host\fP="" + only consider snapshots for this host + +.PP +\fB\-\-no\-default\-permissions\fP[=false] + for 'allow\-other', ignore Unix permissions and allow users to read all snapshot files + +.PP +\fB\-\-owner\-root\fP[=false] + use 'root' as the owner of files and dirs + +.PP +\fB\-\-path\fP=[] + only consider snapshots which include this (absolute) \fB\fCpath\fR + +.PP +\fB\-\-snapshot\-template\fP="2006\-01\-02T15:04:05Z07:00" + set \fB\fCtemplate\fR to use for snapshot dirs + +.PP +\fB\-\-tag\fP=[] + only consider snapshots which include this \fB\fCtaglist\fR + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB\-\-cacert\fP=[] + \fB\fCfile\fR to load root certificates from (default: use system certificates) + +.PP +\fB\-\-cache\-dir\fP="" + set the cache directory. (default: use system default cache directory) + +.PP +\fB\-\-cleanup\-cache\fP[=false] + auto remove old cache directories + +.PP +\fB\-\-json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB\-\-key\-hint\fP="" + key ID of key to try decrypting first (default: $RESTIC\_KEY\_HINT) + +.PP +\fB\-\-limit\-download\fP=0 + limits downloads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-limit\-upload\fP=0 + limits uploads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-no\-cache\fP[=false] + do not use a local cache + +.PP +\fB\-\-no\-lock\fP[=false] + do not lock the repo, this allows some operations on read\-only repos + +.PP +\fB\-o\fP, \fB\-\-option\fP=[] + set extended option (\fB\fCkey=value\fR, can be specified multiple times) + +.PP +\fB\-\-password\-command\fP="" + specify a shell command to obtain a password (default: $RESTIC\_PASSWORD\_COMMAND) + +.PP +\fB\-p\fP, \fB\-\-password\-file\fP="" + read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE) + +.PP +\fB\-q\fP, \fB\-\-quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB\-r\fP, \fB\-\-repo\fP="" + repository to backup to or restore from (default: $RESTIC\_REPOSITORY) + +.PP +\fB\-\-tls\-client\-cert\fP="" + path to a file containing PEM encoded TLS client certificate and private key + +.PP +\fB\-v\fP, \fB\-\-verbose\fP[=0] + be verbose (specify \-\-verbose multiple times or level \fB\fCn\fR) + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic-prune.1 b/doc/man/restic-prune.1 new file mode 100644 index 000000000..2089fddc8 --- /dev/null +++ b/doc/man/restic-prune.1 @@ -0,0 +1,96 @@ +.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" "" +.nh +.ad l + + +.SH NAME +.PP +restic\-prune \- Remove unneeded data from the repository + + +.SH SYNOPSIS +.PP +\fBrestic prune [flags]\fP + + +.SH DESCRIPTION +.PP +The "prune" command checks the repository and removes data that is not +referenced and therefore not needed any more. + + +.SH OPTIONS +.PP +\fB\-h\fP, \fB\-\-help\fP[=false] + help for prune + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB\-\-cacert\fP=[] + \fB\fCfile\fR to load root certificates from (default: use system certificates) + +.PP +\fB\-\-cache\-dir\fP="" + set the cache directory. (default: use system default cache directory) + +.PP +\fB\-\-cleanup\-cache\fP[=false] + auto remove old cache directories + +.PP +\fB\-\-json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB\-\-key\-hint\fP="" + key ID of key to try decrypting first (default: $RESTIC\_KEY\_HINT) + +.PP +\fB\-\-limit\-download\fP=0 + limits downloads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-limit\-upload\fP=0 + limits uploads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-no\-cache\fP[=false] + do not use a local cache + +.PP +\fB\-\-no\-lock\fP[=false] + do not lock the repo, this allows some operations on read\-only repos + +.PP +\fB\-o\fP, \fB\-\-option\fP=[] + set extended option (\fB\fCkey=value\fR, can be specified multiple times) + +.PP +\fB\-\-password\-command\fP="" + specify a shell command to obtain a password (default: $RESTIC\_PASSWORD\_COMMAND) + +.PP +\fB\-p\fP, \fB\-\-password\-file\fP="" + read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE) + +.PP +\fB\-q\fP, \fB\-\-quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB\-r\fP, \fB\-\-repo\fP="" + repository to backup to or restore from (default: $RESTIC\_REPOSITORY) + +.PP +\fB\-\-tls\-client\-cert\fP="" + path to a file containing PEM encoded TLS client certificate and private key + +.PP +\fB\-v\fP, \fB\-\-verbose\fP[=0] + be verbose (specify \-\-verbose multiple times or level \fB\fCn\fR) + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic-rebuild-index.1 b/doc/man/restic-rebuild-index.1 new file mode 100644 index 000000000..d82e70f2c --- /dev/null +++ b/doc/man/restic-rebuild-index.1 @@ -0,0 +1,96 @@ +.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" "" +.nh +.ad l + + +.SH NAME +.PP +restic\-rebuild\-index \- Build a new index file + + +.SH SYNOPSIS +.PP +\fBrestic rebuild\-index [flags]\fP + + +.SH DESCRIPTION +.PP +The "rebuild\-index" command creates a new index based on the pack files in the +repository. + + +.SH OPTIONS +.PP +\fB\-h\fP, \fB\-\-help\fP[=false] + help for rebuild\-index + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB\-\-cacert\fP=[] + \fB\fCfile\fR to load root certificates from (default: use system certificates) + +.PP +\fB\-\-cache\-dir\fP="" + set the cache directory. (default: use system default cache directory) + +.PP +\fB\-\-cleanup\-cache\fP[=false] + auto remove old cache directories + +.PP +\fB\-\-json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB\-\-key\-hint\fP="" + key ID of key to try decrypting first (default: $RESTIC\_KEY\_HINT) + +.PP +\fB\-\-limit\-download\fP=0 + limits downloads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-limit\-upload\fP=0 + limits uploads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-no\-cache\fP[=false] + do not use a local cache + +.PP +\fB\-\-no\-lock\fP[=false] + do not lock the repo, this allows some operations on read\-only repos + +.PP +\fB\-o\fP, \fB\-\-option\fP=[] + set extended option (\fB\fCkey=value\fR, can be specified multiple times) + +.PP +\fB\-\-password\-command\fP="" + specify a shell command to obtain a password (default: $RESTIC\_PASSWORD\_COMMAND) + +.PP +\fB\-p\fP, \fB\-\-password\-file\fP="" + read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE) + +.PP +\fB\-q\fP, \fB\-\-quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB\-r\fP, \fB\-\-repo\fP="" + repository to backup to or restore from (default: $RESTIC\_REPOSITORY) + +.PP +\fB\-\-tls\-client\-cert\fP="" + path to a file containing PEM encoded TLS client certificate and private key + +.PP +\fB\-v\fP, \fB\-\-verbose\fP[=0] + be verbose (specify \-\-verbose multiple times or level \fB\fCn\fR) + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic-recover.1 b/doc/man/restic-recover.1 new file mode 100644 index 000000000..6d25566b6 --- /dev/null +++ b/doc/man/restic-recover.1 @@ -0,0 +1,97 @@ +.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" "" +.nh +.ad l + + +.SH NAME +.PP +restic\-recover \- Recover data from the repository + + +.SH SYNOPSIS +.PP +\fBrestic recover [flags]\fP + + +.SH DESCRIPTION +.PP +The "recover" command build a new snapshot from all directories it can find in +the raw data of the repository. It can be used if, for example, a snapshot has +been removed by accident with "forget". + + +.SH OPTIONS +.PP +\fB\-h\fP, \fB\-\-help\fP[=false] + help for recover + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB\-\-cacert\fP=[] + \fB\fCfile\fR to load root certificates from (default: use system certificates) + +.PP +\fB\-\-cache\-dir\fP="" + set the cache directory. (default: use system default cache directory) + +.PP +\fB\-\-cleanup\-cache\fP[=false] + auto remove old cache directories + +.PP +\fB\-\-json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB\-\-key\-hint\fP="" + key ID of key to try decrypting first (default: $RESTIC\_KEY\_HINT) + +.PP +\fB\-\-limit\-download\fP=0 + limits downloads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-limit\-upload\fP=0 + limits uploads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-no\-cache\fP[=false] + do not use a local cache + +.PP +\fB\-\-no\-lock\fP[=false] + do not lock the repo, this allows some operations on read\-only repos + +.PP +\fB\-o\fP, \fB\-\-option\fP=[] + set extended option (\fB\fCkey=value\fR, can be specified multiple times) + +.PP +\fB\-\-password\-command\fP="" + specify a shell command to obtain a password (default: $RESTIC\_PASSWORD\_COMMAND) + +.PP +\fB\-p\fP, \fB\-\-password\-file\fP="" + read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE) + +.PP +\fB\-q\fP, \fB\-\-quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB\-r\fP, \fB\-\-repo\fP="" + repository to backup to or restore from (default: $RESTIC\_REPOSITORY) + +.PP +\fB\-\-tls\-client\-cert\fP="" + path to a file containing PEM encoded TLS client certificate and private key + +.PP +\fB\-v\fP, \fB\-\-verbose\fP[=0] + be verbose (specify \-\-verbose multiple times or level \fB\fCn\fR) + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic-restore.1 b/doc/man/restic-restore.1 new file mode 100644 index 000000000..8ea55a5fb --- /dev/null +++ b/doc/man/restic-restore.1 @@ -0,0 +1,128 @@ +.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" "" +.nh +.ad l + + +.SH NAME +.PP +restic\-restore \- Extract the data from a snapshot + + +.SH SYNOPSIS +.PP +\fBrestic restore [flags] snapshotID\fP + + +.SH DESCRIPTION +.PP +The "restore" command extracts the data from a snapshot from the repository to +a directory. + +.PP +The special snapshot "latest" can be used to restore the latest snapshot in the +repository. + + +.SH OPTIONS +.PP +\fB\-e\fP, \fB\-\-exclude\fP=[] + exclude a \fB\fCpattern\fR (can be specified multiple times) + +.PP +\fB\-h\fP, \fB\-\-help\fP[=false] + help for restore + +.PP +\fB\-H\fP, \fB\-\-host\fP="" + only consider snapshots for this host when the snapshot ID is "latest" + +.PP +\fB\-i\fP, \fB\-\-include\fP=[] + include a \fB\fCpattern\fR, exclude everything else (can be specified multiple times) + +.PP +\fB\-\-path\fP=[] + only consider snapshots which include this (absolute) \fB\fCpath\fR for snapshot ID "latest" + +.PP +\fB\-\-tag\fP=[] + only consider snapshots which include this \fB\fCtaglist\fR for snapshot ID "latest" + +.PP +\fB\-t\fP, \fB\-\-target\fP="" + directory to extract data to + +.PP +\fB\-\-verify\fP[=false] + verify restored files content + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB\-\-cacert\fP=[] + \fB\fCfile\fR to load root certificates from (default: use system certificates) + +.PP +\fB\-\-cache\-dir\fP="" + set the cache directory. (default: use system default cache directory) + +.PP +\fB\-\-cleanup\-cache\fP[=false] + auto remove old cache directories + +.PP +\fB\-\-json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB\-\-key\-hint\fP="" + key ID of key to try decrypting first (default: $RESTIC\_KEY\_HINT) + +.PP +\fB\-\-limit\-download\fP=0 + limits downloads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-limit\-upload\fP=0 + limits uploads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-no\-cache\fP[=false] + do not use a local cache + +.PP +\fB\-\-no\-lock\fP[=false] + do not lock the repo, this allows some operations on read\-only repos + +.PP +\fB\-o\fP, \fB\-\-option\fP=[] + set extended option (\fB\fCkey=value\fR, can be specified multiple times) + +.PP +\fB\-\-password\-command\fP="" + specify a shell command to obtain a password (default: $RESTIC\_PASSWORD\_COMMAND) + +.PP +\fB\-p\fP, \fB\-\-password\-file\fP="" + read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE) + +.PP +\fB\-q\fP, \fB\-\-quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB\-r\fP, \fB\-\-repo\fP="" + repository to backup to or restore from (default: $RESTIC\_REPOSITORY) + +.PP +\fB\-\-tls\-client\-cert\fP="" + path to a file containing PEM encoded TLS client certificate and private key + +.PP +\fB\-v\fP, \fB\-\-verbose\fP[=0] + be verbose (specify \-\-verbose multiple times or level \fB\fCn\fR) + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic-self-update.1 b/doc/man/restic-self-update.1 new file mode 100644 index 000000000..1c221cd70 --- /dev/null +++ b/doc/man/restic-self-update.1 @@ -0,0 +1,102 @@ +.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" "" +.nh +.ad l + + +.SH NAME +.PP +restic\-self\-update \- Update the restic binary + + +.SH SYNOPSIS +.PP +\fBrestic self\-update [flags]\fP + + +.SH DESCRIPTION +.PP +The command "self\-update" downloads the latest stable release of restic from +GitHub and replaces the currently running binary. After download, the +authenticity of the binary is verified using the GPG signature on the release +files. + + +.SH OPTIONS +.PP +\fB\-h\fP, \fB\-\-help\fP[=false] + help for self\-update + +.PP +\fB\-\-output\fP="" + Save the downloaded file as \fB\fCfilename\fR (default: running binary itself) + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB\-\-cacert\fP=[] + \fB\fCfile\fR to load root certificates from (default: use system certificates) + +.PP +\fB\-\-cache\-dir\fP="" + set the cache directory. (default: use system default cache directory) + +.PP +\fB\-\-cleanup\-cache\fP[=false] + auto remove old cache directories + +.PP +\fB\-\-json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB\-\-key\-hint\fP="" + key ID of key to try decrypting first (default: $RESTIC\_KEY\_HINT) + +.PP +\fB\-\-limit\-download\fP=0 + limits downloads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-limit\-upload\fP=0 + limits uploads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-no\-cache\fP[=false] + do not use a local cache + +.PP +\fB\-\-no\-lock\fP[=false] + do not lock the repo, this allows some operations on read\-only repos + +.PP +\fB\-o\fP, \fB\-\-option\fP=[] + set extended option (\fB\fCkey=value\fR, can be specified multiple times) + +.PP +\fB\-\-password\-command\fP="" + specify a shell command to obtain a password (default: $RESTIC\_PASSWORD\_COMMAND) + +.PP +\fB\-p\fP, \fB\-\-password\-file\fP="" + read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE) + +.PP +\fB\-q\fP, \fB\-\-quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB\-r\fP, \fB\-\-repo\fP="" + repository to backup to or restore from (default: $RESTIC\_REPOSITORY) + +.PP +\fB\-\-tls\-client\-cert\fP="" + path to a file containing PEM encoded TLS client certificate and private key + +.PP +\fB\-v\fP, \fB\-\-verbose\fP[=0] + be verbose (specify \-\-verbose multiple times or level \fB\fCn\fR) + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic-snapshots.1 b/doc/man/restic-snapshots.1 new file mode 100644 index 000000000..b43c6eb3b --- /dev/null +++ b/doc/man/restic-snapshots.1 @@ -0,0 +1,115 @@ +.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" "" +.nh +.ad l + + +.SH NAME +.PP +restic\-snapshots \- List all snapshots + + +.SH SYNOPSIS +.PP +\fBrestic snapshots [snapshotID ...] [flags]\fP + + +.SH DESCRIPTION +.PP +The "snapshots" command lists all snapshots stored in the repository. + + +.SH OPTIONS +.PP +\fB\-c\fP, \fB\-\-compact\fP[=false] + use compact format + +.PP +\fB\-h\fP, \fB\-\-help\fP[=false] + help for snapshots + +.PP +\fB\-H\fP, \fB\-\-host\fP="" + only consider snapshots for this \fB\fChost\fR + +.PP +\fB\-\-last\fP[=false] + only show the last snapshot for each host and path + +.PP +\fB\-\-path\fP=[] + only consider snapshots for this \fB\fCpath\fR (can be specified multiple times) + +.PP +\fB\-\-tag\fP=[] + only consider snapshots which include this \fB\fCtaglist\fR (can be specified multiple times) + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB\-\-cacert\fP=[] + \fB\fCfile\fR to load root certificates from (default: use system certificates) + +.PP +\fB\-\-cache\-dir\fP="" + set the cache directory. (default: use system default cache directory) + +.PP +\fB\-\-cleanup\-cache\fP[=false] + auto remove old cache directories + +.PP +\fB\-\-json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB\-\-key\-hint\fP="" + key ID of key to try decrypting first (default: $RESTIC\_KEY\_HINT) + +.PP +\fB\-\-limit\-download\fP=0 + limits downloads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-limit\-upload\fP=0 + limits uploads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-no\-cache\fP[=false] + do not use a local cache + +.PP +\fB\-\-no\-lock\fP[=false] + do not lock the repo, this allows some operations on read\-only repos + +.PP +\fB\-o\fP, \fB\-\-option\fP=[] + set extended option (\fB\fCkey=value\fR, can be specified multiple times) + +.PP +\fB\-\-password\-command\fP="" + specify a shell command to obtain a password (default: $RESTIC\_PASSWORD\_COMMAND) + +.PP +\fB\-p\fP, \fB\-\-password\-file\fP="" + read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE) + +.PP +\fB\-q\fP, \fB\-\-quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB\-r\fP, \fB\-\-repo\fP="" + repository to backup to or restore from (default: $RESTIC\_REPOSITORY) + +.PP +\fB\-\-tls\-client\-cert\fP="" + path to a file containing PEM encoded TLS client certificate and private key + +.PP +\fB\-v\fP, \fB\-\-verbose\fP[=0] + be verbose (specify \-\-verbose multiple times or level \fB\fCn\fR) + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic-stats.1 b/doc/man/restic-stats.1 new file mode 100644 index 000000000..f9578c38b --- /dev/null +++ b/doc/man/restic-stats.1 @@ -0,0 +1,131 @@ +.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" "" +.nh +.ad l + + +.SH NAME +.PP +restic\-stats \- Scan the repository and show basic statistics + + +.SH SYNOPSIS +.PP +\fBrestic stats [flags] [snapshot\-ID]\fP + + +.SH DESCRIPTION +.PP +The "stats" command walks one or all snapshots in a repository and +accumulates statistics about the data stored therein. It reports on +the number of unique files and their sizes, according to one of +the counting modes as given by the \-\-mode flag. + +.PP +If no snapshot is specified, all snapshots will be considered. Some +modes make more sense over just a single snapshot, while others +are useful across all snapshots, depending on what you are trying +to calculate. + +.PP +The modes are: + +.RS +.IP \(bu 2 +restore\-size: (default) Counts the size of the restored files. +.IP \(bu 2 +files\-by\-contents: Counts total size of files, where a file is +considered unique if it has unique contents. +.IP \(bu 2 +raw\-data: Counts the size of blobs in the repository, regardless of +how many files reference them. +.IP \(bu 2 +blobs\-per\-file: A combination of files\-by\-contents and raw\-data. +.IP \(bu 2 +Refer to the online manual for more details about each mode. + +.RE + + +.SH OPTIONS +.PP +\fB\-h\fP, \fB\-\-help\fP[=false] + help for stats + +.PP +\fB\-H\fP, \fB\-\-host\fP="" + filter latest snapshot by this hostname + +.PP +\fB\-\-mode\fP="restore\-size" + counting mode: restore\-size (default), files\-by\-contents, blobs\-per\-file, or raw\-data + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB\-\-cacert\fP=[] + \fB\fCfile\fR to load root certificates from (default: use system certificates) + +.PP +\fB\-\-cache\-dir\fP="" + set the cache directory. (default: use system default cache directory) + +.PP +\fB\-\-cleanup\-cache\fP[=false] + auto remove old cache directories + +.PP +\fB\-\-json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB\-\-key\-hint\fP="" + key ID of key to try decrypting first (default: $RESTIC\_KEY\_HINT) + +.PP +\fB\-\-limit\-download\fP=0 + limits downloads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-limit\-upload\fP=0 + limits uploads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-no\-cache\fP[=false] + do not use a local cache + +.PP +\fB\-\-no\-lock\fP[=false] + do not lock the repo, this allows some operations on read\-only repos + +.PP +\fB\-o\fP, \fB\-\-option\fP=[] + set extended option (\fB\fCkey=value\fR, can be specified multiple times) + +.PP +\fB\-\-password\-command\fP="" + specify a shell command to obtain a password (default: $RESTIC\_PASSWORD\_COMMAND) + +.PP +\fB\-p\fP, \fB\-\-password\-file\fP="" + read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE) + +.PP +\fB\-q\fP, \fB\-\-quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB\-r\fP, \fB\-\-repo\fP="" + repository to backup to or restore from (default: $RESTIC\_REPOSITORY) + +.PP +\fB\-\-tls\-client\-cert\fP="" + path to a file containing PEM encoded TLS client certificate and private key + +.PP +\fB\-v\fP, \fB\-\-verbose\fP[=0] + be verbose (specify \-\-verbose multiple times or level \fB\fCn\fR) + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic-tag.1 b/doc/man/restic-tag.1 new file mode 100644 index 000000000..c7e8ddbf1 --- /dev/null +++ b/doc/man/restic-tag.1 @@ -0,0 +1,126 @@ +.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" "" +.nh +.ad l + + +.SH NAME +.PP +restic\-tag \- Modify tags on snapshots + + +.SH SYNOPSIS +.PP +\fBrestic tag [flags] [snapshot\-ID ...]\fP + + +.SH DESCRIPTION +.PP +The "tag" command allows you to modify tags on exiting snapshots. + +.PP +You can either set/replace the entire set of tags on a snapshot, or +add tags to/remove tags from the existing set. + +.PP +When no snapshot\-ID is given, all snapshots matching the host, tag and path filter criteria are modified. + + +.SH OPTIONS +.PP +\fB\-\-add\fP=[] + \fB\fCtag\fR which will be added to the existing tags (can be given multiple times) + +.PP +\fB\-h\fP, \fB\-\-help\fP[=false] + help for tag + +.PP +\fB\-H\fP, \fB\-\-host\fP="" + only consider snapshots for this \fB\fChost\fR, when no snapshot ID is given + +.PP +\fB\-\-path\fP=[] + only consider snapshots which include this (absolute) \fB\fCpath\fR, when no snapshot\-ID is given + +.PP +\fB\-\-remove\fP=[] + \fB\fCtag\fR which will be removed from the existing tags (can be given multiple times) + +.PP +\fB\-\-set\fP=[] + \fB\fCtag\fR which will replace the existing tags (can be given multiple times) + +.PP +\fB\-\-tag\fP=[] + only consider snapshots which include this \fB\fCtaglist\fR, when no snapshot\-ID is given + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB\-\-cacert\fP=[] + \fB\fCfile\fR to load root certificates from (default: use system certificates) + +.PP +\fB\-\-cache\-dir\fP="" + set the cache directory. (default: use system default cache directory) + +.PP +\fB\-\-cleanup\-cache\fP[=false] + auto remove old cache directories + +.PP +\fB\-\-json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB\-\-key\-hint\fP="" + key ID of key to try decrypting first (default: $RESTIC\_KEY\_HINT) + +.PP +\fB\-\-limit\-download\fP=0 + limits downloads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-limit\-upload\fP=0 + limits uploads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-no\-cache\fP[=false] + do not use a local cache + +.PP +\fB\-\-no\-lock\fP[=false] + do not lock the repo, this allows some operations on read\-only repos + +.PP +\fB\-o\fP, \fB\-\-option\fP=[] + set extended option (\fB\fCkey=value\fR, can be specified multiple times) + +.PP +\fB\-\-password\-command\fP="" + specify a shell command to obtain a password (default: $RESTIC\_PASSWORD\_COMMAND) + +.PP +\fB\-p\fP, \fB\-\-password\-file\fP="" + read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE) + +.PP +\fB\-q\fP, \fB\-\-quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB\-r\fP, \fB\-\-repo\fP="" + repository to backup to or restore from (default: $RESTIC\_REPOSITORY) + +.PP +\fB\-\-tls\-client\-cert\fP="" + path to a file containing PEM encoded TLS client certificate and private key + +.PP +\fB\-v\fP, \fB\-\-verbose\fP[=0] + be verbose (specify \-\-verbose multiple times or level \fB\fCn\fR) + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic-unlock.1 b/doc/man/restic-unlock.1 new file mode 100644 index 000000000..10e3b51ff --- /dev/null +++ b/doc/man/restic-unlock.1 @@ -0,0 +1,99 @@ +.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" "" +.nh +.ad l + + +.SH NAME +.PP +restic\-unlock \- Remove locks other processes created + + +.SH SYNOPSIS +.PP +\fBrestic unlock [flags]\fP + + +.SH DESCRIPTION +.PP +The "unlock" command removes stale locks that have been created by other restic processes. + + +.SH OPTIONS +.PP +\fB\-h\fP, \fB\-\-help\fP[=false] + help for unlock + +.PP +\fB\-\-remove\-all\fP[=false] + remove all locks, even non\-stale ones + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB\-\-cacert\fP=[] + \fB\fCfile\fR to load root certificates from (default: use system certificates) + +.PP +\fB\-\-cache\-dir\fP="" + set the cache directory. (default: use system default cache directory) + +.PP +\fB\-\-cleanup\-cache\fP[=false] + auto remove old cache directories + +.PP +\fB\-\-json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB\-\-key\-hint\fP="" + key ID of key to try decrypting first (default: $RESTIC\_KEY\_HINT) + +.PP +\fB\-\-limit\-download\fP=0 + limits downloads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-limit\-upload\fP=0 + limits uploads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-no\-cache\fP[=false] + do not use a local cache + +.PP +\fB\-\-no\-lock\fP[=false] + do not lock the repo, this allows some operations on read\-only repos + +.PP +\fB\-o\fP, \fB\-\-option\fP=[] + set extended option (\fB\fCkey=value\fR, can be specified multiple times) + +.PP +\fB\-\-password\-command\fP="" + specify a shell command to obtain a password (default: $RESTIC\_PASSWORD\_COMMAND) + +.PP +\fB\-p\fP, \fB\-\-password\-file\fP="" + read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE) + +.PP +\fB\-q\fP, \fB\-\-quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB\-r\fP, \fB\-\-repo\fP="" + repository to backup to or restore from (default: $RESTIC\_REPOSITORY) + +.PP +\fB\-\-tls\-client\-cert\fP="" + path to a file containing PEM encoded TLS client certificate and private key + +.PP +\fB\-v\fP, \fB\-\-verbose\fP[=0] + be verbose (specify \-\-verbose multiple times or level \fB\fCn\fR) + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic-version.1 b/doc/man/restic-version.1 new file mode 100644 index 000000000..d107cf618 --- /dev/null +++ b/doc/man/restic-version.1 @@ -0,0 +1,96 @@ +.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" "" +.nh +.ad l + + +.SH NAME +.PP +restic\-version \- Print version information + + +.SH SYNOPSIS +.PP +\fBrestic version [flags]\fP + + +.SH DESCRIPTION +.PP +The "version" command prints detailed information about the build environment +and the version of this software. + + +.SH OPTIONS +.PP +\fB\-h\fP, \fB\-\-help\fP[=false] + help for version + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB\-\-cacert\fP=[] + \fB\fCfile\fR to load root certificates from (default: use system certificates) + +.PP +\fB\-\-cache\-dir\fP="" + set the cache directory. (default: use system default cache directory) + +.PP +\fB\-\-cleanup\-cache\fP[=false] + auto remove old cache directories + +.PP +\fB\-\-json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB\-\-key\-hint\fP="" + key ID of key to try decrypting first (default: $RESTIC\_KEY\_HINT) + +.PP +\fB\-\-limit\-download\fP=0 + limits downloads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-limit\-upload\fP=0 + limits uploads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-no\-cache\fP[=false] + do not use a local cache + +.PP +\fB\-\-no\-lock\fP[=false] + do not lock the repo, this allows some operations on read\-only repos + +.PP +\fB\-o\fP, \fB\-\-option\fP=[] + set extended option (\fB\fCkey=value\fR, can be specified multiple times) + +.PP +\fB\-\-password\-command\fP="" + specify a shell command to obtain a password (default: $RESTIC\_PASSWORD\_COMMAND) + +.PP +\fB\-p\fP, \fB\-\-password\-file\fP="" + read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE) + +.PP +\fB\-q\fP, \fB\-\-quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB\-r\fP, \fB\-\-repo\fP="" + repository to backup to or restore from (default: $RESTIC\_REPOSITORY) + +.PP +\fB\-\-tls\-client\-cert\fP="" + path to a file containing PEM encoded TLS client certificate and private key + +.PP +\fB\-v\fP, \fB\-\-verbose\fP[=0] + be verbose (specify \-\-verbose multiple times or level \fB\fCn\fR) + + +.SH SEE ALSO +.PP +\fBrestic(1)\fP diff --git a/doc/man/restic.1 b/doc/man/restic.1 new file mode 100644 index 000000000..8a483df4f --- /dev/null +++ b/doc/man/restic.1 @@ -0,0 +1,94 @@ +.TH "restic backup" "1" "Jan 2017" "generated by `restic generate`" "" +.nh +.ad l + + +.SH NAME +.PP +restic \- Backup and restore files + + +.SH SYNOPSIS +.PP +\fBrestic [flags]\fP + + +.SH DESCRIPTION +.PP +restic is a backup program which allows saving multiple revisions of files and +directories in an encrypted repository stored on different backends. + + +.SH OPTIONS +.PP +\fB\-\-cacert\fP=[] + \fB\fCfile\fR to load root certificates from (default: use system certificates) + +.PP +\fB\-\-cache\-dir\fP="" + set the cache directory. (default: use system default cache directory) + +.PP +\fB\-\-cleanup\-cache\fP[=false] + auto remove old cache directories + +.PP +\fB\-h\fP, \fB\-\-help\fP[=false] + help for restic + +.PP +\fB\-\-json\fP[=false] + set output mode to JSON for commands that support it + +.PP +\fB\-\-key\-hint\fP="" + key ID of key to try decrypting first (default: $RESTIC\_KEY\_HINT) + +.PP +\fB\-\-limit\-download\fP=0 + limits downloads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-limit\-upload\fP=0 + limits uploads to a maximum rate in KiB/s. (default: unlimited) + +.PP +\fB\-\-no\-cache\fP[=false] + do not use a local cache + +.PP +\fB\-\-no\-lock\fP[=false] + do not lock the repo, this allows some operations on read\-only repos + +.PP +\fB\-o\fP, \fB\-\-option\fP=[] + set extended option (\fB\fCkey=value\fR, can be specified multiple times) + +.PP +\fB\-\-password\-command\fP="" + specify a shell command to obtain a password (default: $RESTIC\_PASSWORD\_COMMAND) + +.PP +\fB\-p\fP, \fB\-\-password\-file\fP="" + read the repository password from a file (default: $RESTIC\_PASSWORD\_FILE) + +.PP +\fB\-q\fP, \fB\-\-quiet\fP[=false] + do not output comprehensive progress report + +.PP +\fB\-r\fP, \fB\-\-repo\fP="" + repository to backup to or restore from (default: $RESTIC\_REPOSITORY) + +.PP +\fB\-\-tls\-client\-cert\fP="" + path to a file containing PEM encoded TLS client certificate and private key + +.PP +\fB\-v\fP, \fB\-\-verbose\fP[=0] + be verbose (specify \-\-verbose multiple times or level \fB\fCn\fR) + + +.SH SEE ALSO +.PP +\fBrestic\-backup(1)\fP, \fBrestic\-cache(1)\fP, \fBrestic\-cat(1)\fP, \fBrestic\-check(1)\fP, \fBrestic\-diff(1)\fP, \fBrestic\-dump(1)\fP, \fBrestic\-find(1)\fP, \fBrestic\-forget(1)\fP, \fBrestic\-generate(1)\fP, \fBrestic\-init(1)\fP, \fBrestic\-key(1)\fP, \fBrestic\-list(1)\fP, \fBrestic\-ls(1)\fP, \fBrestic\-migrate(1)\fP, \fBrestic\-mount(1)\fP, \fBrestic\-prune(1)\fP, \fBrestic\-rebuild\-index(1)\fP, \fBrestic\-recover(1)\fP, \fBrestic\-restore(1)\fP, \fBrestic\-self\-update(1)\fP, \fBrestic\-snapshots(1)\fP, \fBrestic\-stats(1)\fP, \fBrestic\-tag(1)\fP, \fBrestic\-unlock(1)\fP, \fBrestic\-version(1)\fP diff --git a/doc/manual_rest.rst b/doc/manual_rest.rst new file mode 100644 index 000000000..e904ad581 --- /dev/null +++ b/doc/manual_rest.rst @@ -0,0 +1,391 @@ +Manual +====== + +Usage help +---------- + +Usage help is available: + +.. code-block:: console + + $ ./restic --help + restic is a backup program which allows saving multiple revisions of files and + directories in an encrypted repository stored on different backends. + + Usage: + restic [command] + + Available Commands: + backup Create a new backup of files and/or directories + cache Operate on local cache directories + cat Print internal objects to stdout + check Check the repository for errors + diff Show differences between two snapshots + dump Print a backed-up file to stdout + find Find a file or directory + forget Remove snapshots from the repository + generate Generate manual pages and auto-completion files (bash, zsh) + help Help about any command + init Initialize a new repository + key Manage keys (passwords) + list List objects in the repository + ls List files in a snapshot + migrate Apply migrations + mount Mount the repository + prune Remove unneeded data from the repository + rebuild-index Build a new index file + restore Extract the data from a snapshot + snapshots List all snapshots + stats Count up sizes and show information about repository data + tag Modify tags on snapshots + unlock Remove locks other processes created + version Print version information + + Flags: + --cacert file file to load root certificates from (default: use system certificates) + --cache-dir string set the cache directory. (default: use system default cache directory) + --cleanup-cache auto remove old cache directories + -h, --help help for restic + --json set output mode to JSON for commands that support it + --key-hint string key ID of key to try decrypting first (default: $RESTIC_KEY_HINT) + --limit-download int limits downloads to a maximum rate in KiB/s. (default: unlimited) + --limit-upload int limits uploads to a maximum rate in KiB/s. (default: unlimited) + --no-cache do not use a local cache + --no-lock do not lock the repo, this allows some operations on read-only repos + -o, --option key=value set extended option (key=value, can be specified multiple times) + -p, --password-file string read the repository password from a file (default: $RESTIC_PASSWORD_FILE) + -q, --quiet do not output comprehensive progress report + -r, --repo string repository to backup to or restore from (default: $RESTIC_REPOSITORY) + --tls-client-cert string path to a file containing PEM encoded TLS client certificate and private key + -v, --verbose n[=-1] be verbose (specify --verbose multiple times or level n) + + Use "restic [command] --help" for more information about a command. + +Similar to programs such as ``git``, restic has a number of +sub-commands. You can see these commands in the listing above. Each +sub-command may have own command-line options, and there is a help +option for each command which lists them, e.g. for the ``backup`` +command: + +.. code-block:: console + + $ ./restic backup --help + The "backup" command creates a new snapshot and saves the files and directories + given as the arguments. + + Usage: + restic backup [flags] FILE/DIR [FILE/DIR] ... + + Flags: + -e, --exclude pattern exclude a pattern (can be specified multiple times) + --exclude-caches excludes cache directories that are marked with a CACHEDIR.TAG file + --exclude-file file read exclude patterns from a file (can be specified multiple times) + --exclude-if-present stringArray takes filename[:header], exclude contents of directories containing filename (except filename itself) if header of that file is as provided (can be specified multiple times) + --files-from string read the files to backup from file (can be combined with file args/can be specified multiple times) + -f, --force force re-reading the target files/directories (overrides the "parent" flag) + -h, --help help for backup + --hostname hostname set the hostname for the snapshot manually. To prevent an expensive rescan use the "parent" flag + -x, --one-file-system exclude other file systems + --parent string use this parent snapshot (default: last snapshot in the repo that has the same target files/directories) + --stdin read backup from stdin + --stdin-filename string file name to use when reading from stdin (default "stdin") + --tag tag add a tag for the new snapshot (can be specified multiple times) + --time string time of the backup (ex. '2012-11-01 22:08:41') (default: now) + --with-atime store the atime for all files and directories + + Global Flags: + --cacert file file to load root certificates from (default: use system certificates) + --cache-dir string set the cache directory. (default: use system default cache directory) + --cleanup-cache auto remove old cache directories + --json set output mode to JSON for commands that support it + --key-hint string key ID of key to try decrypting first (default: $RESTIC_KEY_HINT) + --limit-download int limits downloads to a maximum rate in KiB/s. (default: unlimited) + --limit-upload int limits uploads to a maximum rate in KiB/s. (default: unlimited) + --no-cache do not use a local cache + --no-lock do not lock the repo, this allows some operations on read-only repos + -o, --option key=value set extended option (key=value, can be specified multiple times) + -p, --password-file string read the repository password from a file (default: $RESTIC_PASSWORD_FILE) + -q, --quiet do not output comprehensive progress report + -r, --repo string repository to backup to or restore from (default: $RESTIC_REPOSITORY) + --tls-client-cert string path to a file containing PEM encoded TLS client certificate and private key + -v, --verbose n[=-1] be verbose (specify --verbose multiple times or level n) + +Subcommand that support showing progress information such as ``backup``, +``check`` and ``prune`` will do so unless the quiet flag ``-q`` or +``--quiet`` is set. When running from a non-interactive console progress +reporting will be limited to once every 10 seconds to not fill your +logs. Use ``backup`` with the quiet flag ``-q`` or ``--quiet`` to skip +the initial scan of the source directory, this may shorten the backup +time needed for large directories. + +Additionally on Unix systems if ``restic`` receives a SIGUSR1 signal the +current progress will be written to the standard output so you can check up +on the status at will. + +Manage tags +----------- + +Managing tags on snapshots is done with the ``tag`` command. The +existing set of tags can be replaced completely, tags can be added or +removed. The result is directly visible in the ``snapshots`` command. + +Let's say we want to tag snapshot ``590c8fc8`` with the tags ``NL`` and +``CH`` and remove all other tags that may be present, the following +command does that: + +.. code-block:: console + + $ restic -r /srv/restic-repo tag --set NL --set CH 590c8fc8 + create exclusive lock for repository + modified tags on 1 snapshots + +Note the snapshot ID has changed, so between each change we need to look +up the new ID of the snapshot. But there is an even better way, the +``tag`` command accepts ``--tag`` for a filter, so we can filter +snapshots based on the tag we just added. + +So we can add and remove tags incrementally like this: + +.. code-block:: console + + $ restic -r /srv/restic-repo tag --tag NL --remove CH + create exclusive lock for repository + modified tags on 1 snapshots + + $ restic -r /srv/restic-repo tag --tag NL --add UK + create exclusive lock for repository + modified tags on 1 snapshots + + $ restic -r /srv/restic-repo tag --tag NL --remove NL + create exclusive lock for repository + modified tags on 1 snapshots + + $ restic -r /srv/restic-repo tag --tag NL --add SOMETHING + no snapshots were modified + +Under the hood +-------------- + +Browse repository objects +~~~~~~~~~~~~~~~~~~~~~~~~~ + +Internally, a repository stores data of several different types +described in the `design +documentation `__. +You can ``list`` objects such as blobs, packs, index, snapshots, keys or +locks with the following command: + +.. code-block:: console + + $ restic -r /srv/restic-repo list snapshots + d369ccc7d126594950bf74f0a348d5d98d9e99f3215082eb69bf02dc9b3e464c + +The ``find`` command searches for a given +`pattern `__ in the +repository. + +.. code-block:: console + + $ restic -r backup find test.txt + debug log file restic.log + debug enabled + enter password for repository: + found 1 matching entries in snapshot 196bc5760c909a7681647949e80e5448e276521489558525680acf1bd428af36 + -rw-r--r-- 501 20 5 2015-08-26 14:09:57 +0200 CEST path/to/test.txt + +The ``cat`` command allows you to display the JSON representation of the +objects or their raw content. + +.. code-block:: console + + $ restic -r /srv/restic-repo cat snapshot d369ccc7d126594950bf74f0a348d5d98d9e99f3215082eb69bf02dc9b3e464c + enter password for repository: + { + "time": "2015-08-12T12:52:44.091448856+02:00", + "tree": "05cec17e8d3349f402576d02576a2971fc0d9f9776ce2f441c7010849c4ff5af", + "paths": [ + "/home/user/work" + ], + "hostname": "kasimir", + "username": "username", + "uid": 501, + "gid": 20 + } + +Metadata handling +~~~~~~~~~~~~~~~~~ + +Restic saves and restores most default attributes, including extended attributes like ACLs. +Sparse files are not handled in a special way yet, and aren't restored. + +The following metadata is handled by restic: + +- Name +- Type +- Mode +- ModTime +- AccessTime +- ChangeTime +- UID +- GID +- User +- Group +- Inode +- Size +- Links +- LinkTarget +- Device +- Content +- Subtree +- ExtendedAttributes + + +Getting information about repository data +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Use the ``stats`` command to count up stats about the data in the repository. +There are different counting modes available using the ``--mode`` flag, +depending on what you want to calculate. The default is the restore size, or +the size required to restore the files: + +- ``restore-size`` (default) counts the size of the restored files. +- ``files-by-contents`` counts the total size of unique files as given by their + contents. This can be useful since a file is considered unique only if it has + unique contents. Keep in mind that a small change to a large file (even when the + file name/path hasn't changed) will cause them to look like different files, thus + essentially causing the whole size of the file to be counted twice. +- ``raw-data`` counts the size of the blobs in the repository, regardless of how many + files reference them. This tells you how much restic has reduced all your original + data down to (either for a single snapshot or across all your backups), and compared + to the size given by the restore-size mode, can tell you how much deduplication is + helping you. +- ``blobs-per-file`` is kind of a mix between files-by-contents and raw-data modes; + it is useful for knowing how much value your backup is providing you in terms of unique + data stored by file. Like files-by-contents, it is resilient to file renames/moves. + Unlike files-by-contents, it does not balloon to high values when large files have + small edits, as long as the file path stayed the same. Unlike raw-data, this mode + DOES consider how many files point to each blob such that the more files a blob is + referenced by, the more it counts toward the size. + +For example, to calculate how much space would be +required to restore the latest snapshot (from any host that made it): + +.. code-block:: console + + $ restic stats latest + password is correct + Total File Count: 10538 + Total Size: 37.824 GiB + +If multiple hosts are backing up to the repository, the latest snapshot may not +be the one you want. You can specify the latest snapshot from only a specific +host by using the ``--host`` flag: + +.. code-block:: console + + $ restic stats --host myserver latest + password is correct + Total File Count: 21766 + Total Size: 481.783 GiB + +There we see that it would take 482 GiB of disk space to restore the latest +snapshot from "myserver". + +But how much space does that snapshot take on disk? In other words, how much +has restic's deduplication helped? We can check: + +.. code-block:: console + + $ restic stats --host myserver --mode raw-data latest + password is correct + Total Blob Count: 340847 + Total Size: 458.663 GiB + +Comparing this size to the previous command, we see that restic has saved +about 23 GiB of space with deduplication. + +Which mode you use depends on your exact use case. Some modes are more useful +across all snapshots, while others make more sense on just a single snapshot, +depending on what you're trying to calculate. + + +Scripting +--------- + +Restic supports the output of some commands in JSON format, the JSON +data can then be processed by other programs (e.g. +`jq `__). The following example +lists all snapshots as JSON and uses ``jq`` to pretty-print the result: + +.. code-block:: console + + $ restic -r /srv/restic-repo snapshots --json | jq . + [ + { + "time": "2017-03-11T09:57:43.26630619+01:00", + "tree": "bf25241679533df554fc0fd0ae6dbb9dcf1859a13f2bc9dd4543c354eff6c464", + "paths": [ + "/home/work/doc" + ], + "hostname": "kasimir", + "username": "fd0", + "uid": 1000, + "gid": 100, + "id": "bbeed6d28159aa384d1ccc6fa0b540644b1b9599b162d2972acda86b1b80f89e" + }, + { + "time": "2017-03-11T09:58:57.541446938+01:00", + "tree": "7f8c95d3420baaac28dc51609796ae0e0ecfb4862b609a9f38ffaf7ae2d758da", + "paths": [ + "/home/user/shared" + ], + "hostname": "kasimir", + "username": "fd0", + "uid": 1000, + "gid": 100, + "id": "b157d91c16f0ba56801ece3a708dfc53791fe2a97e827090d6ed9a69a6ebdca0" + } + ] + +Temporary files +--------------- + +During some operations (e.g. ``backup`` and ``prune``) restic uses +temporary files to store data. These files will, by default, be saved to +the system's temporary directory, on Linux this is usually located in +``/tmp/``. The environment variable ``TMPDIR`` can be used to specify a +different directory, e.g. to use the directory ``/var/tmp/restic-tmp`` +instead of the default, set the environment variable like this: + +.. code-block:: console + + $ export TMPDIR=/var/tmp/restic-tmp + $ restic -r /srv/restic-repo backup ~/work + + + +Caching +------- + +Restic keeps a cache with some files from the repository on the local machine. +This allows faster operations, since meta data does not need to be loaded from +a remote repository. The cache is automatically created, usually in an +OS-specific cache folder: + + * Linux/other: ``~/.cache/restic`` (or ``$XDG_CACHE_HOME/restic``) + * macOS: ``~/Library/Caches/restic`` + * Windows: ``%LOCALAPPDATA%/restic`` + +The command line parameter ``--cache-dir`` can each be used to override the +default cache location. The parameter ``--no-cache`` disables the cache +entirely. In this case, all data is loaded from the repo. + +The cache is ephemeral: When a file cannot be read from the cache, it is loaded +from the repository. + +Within the cache directory, there's a sub directory for each repository the +cache was used with. Restic updates the timestamps of a repo directory each +time it is used, so by looking at the timestamps of the sub directories of the +cache directory it can decide which sub directories are old and probably not +needed any more. You can either remove these directories manually, or run a +restic command with the ``--cleanup-cache`` flag. diff --git a/doc/requirements.txt b/doc/requirements.txt new file mode 100644 index 000000000..82133027c --- /dev/null +++ b/doc/requirements.txt @@ -0,0 +1,2 @@ +sphinx +sphinx_rtd_theme diff --git a/doc/test_irreducibility.gap b/doc/test_irreducibility.gap new file mode 100644 index 000000000..22b2ae3a4 --- /dev/null +++ b/doc/test_irreducibility.gap @@ -0,0 +1,25 @@ +# This file is a script for GAP and tests a list of polynomials in hexadecimal +# for irreducibility over F_2 + +# create x over F_2 = GF(2) +x := Indeterminate(GF(2), "x"); + +# test if polynomial is irreducible, i.e. the number of factors is one +IrredPoly := function (poly) + return (Length(Factors(poly)) = 1); +end;; + +# create a polynomial in x from the hexadecimal representation of the +# coefficients +Hex2Poly := function (s) + return ValuePol(CoefficientsQadic(IntHexString(s), 2), x); +end;; + +# list of candidates, in hex +candidates := [ "3DA3358B4DC173" ]; + +# create real polynomials +L := List(candidates, Hex2Poly); + +# filter and display the list of irreducible polynomials contained in L +Display(Filtered(L, x -> (IrredPoly(x)))); diff --git a/doc/zsh-completion.zsh b/doc/zsh-completion.zsh new file mode 100644 index 000000000..7595387d2 --- /dev/null +++ b/doc/zsh-completion.zsh @@ -0,0 +1,20 @@ +#compdef restic + +_arguments \ + '1: :->level1' \ + '2: :_files' +case $state in + level1) + case $words[1] in + restic) + _arguments '1: :(backup cache cat check diff dump find forget generate help init key list ls migrate mount options prune rebuild-index recover restore self-update snapshots stats tag unlock version)' + ;; + *) + _arguments '*: :_files' + ;; + esac + ;; + *) + _arguments '*: :_files' + ;; +esac diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 000000000..60890891a --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,7 @@ +FROM alpine:latest + +COPY restic /usr/bin + +RUN apk add --update --no-cache ca-certificates fuse openssh-client + +ENTRYPOINT ["/usr/bin/restic"] diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 000000000..1c2c9205c --- /dev/null +++ b/docker/README.md @@ -0,0 +1,24 @@ +# Docker image + +## Build + +From the root of this repository run: + +``` +./docker/build.sh +``` + +image name will be `restic/restic:latest` + +## Run + +Set environment variable `RESTIC_REPOSITORY` and map volume to directories and +files like: + +``` +docker run --rm -ti \ + -v $HOME/.restic/passfile:/pass \ + -v $HOME/importantdirectory:/data \ + -e RESTIC_REPOSITORY=rest:https://user:pass@hostname/ \ + restic/restic -p /pass backup /data +``` diff --git a/docker/build.sh b/docker/build.sh new file mode 100755 index 000000000..54c0b1bf7 --- /dev/null +++ b/docker/build.sh @@ -0,0 +1,11 @@ +#!/bin/sh + +set -e + +echo "Build binary using golang docker image" +docker run --rm -ti \ + -v "`pwd`":/go/src/github.com/restic/restic \ + -w /go/src/github.com/restic/restic golang:1.11.1-alpine go run build.go + +echo "Build docker image restic/restic:latest" +docker build --rm -t restic/restic:latest -f docker/Dockerfile . diff --git a/go.mod b/go.mod new file mode 100644 index 000000000..936375e55 --- /dev/null +++ b/go.mod @@ -0,0 +1,55 @@ +module github.com/restic/restic + +require ( + bazil.org/fuse v0.0.0-20180421153158-65cc252bf669 + cloud.google.com/go v0.27.0 // indirect + github.com/Azure/azure-sdk-for-go v20.1.0+incompatible + github.com/Azure/go-autorest v10.15.3+incompatible // indirect + github.com/cenkalti/backoff v2.0.0+incompatible + github.com/cpuguy83/go-md2man v1.0.8 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect + github.com/dnaeon/go-vcr v0.0.0-20180814043457-aafff18a5cc2 // indirect + github.com/elithrar/simple-scrypt v1.3.0 + github.com/go-ini/ini v1.38.2 // indirect + github.com/golang/protobuf v1.2.0 // indirect + github.com/google/go-cmp v0.2.0 + github.com/gopherjs/gopherjs v0.0.0-20180825215210-0210a2f0f73c // indirect + github.com/hashicorp/golang-lru v0.5.0 + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/jtolds/gls v4.2.1+incompatible // indirect + github.com/juju/ratelimit v1.0.1 + github.com/kr/fs v0.1.0 // indirect + github.com/kr/pretty v0.1.0 // indirect + github.com/kurin/blazer v0.5.1 + github.com/marstr/guid v1.1.0 // indirect + github.com/mattn/go-isatty v0.0.4 + github.com/minio/minio-go v6.0.7+incompatible + github.com/mitchellh/go-homedir v1.0.0 // indirect + github.com/ncw/swift v1.0.41 + github.com/pkg/errors v0.8.0 + github.com/pkg/profile v1.2.1 + github.com/pkg/sftp v1.8.2 + github.com/pkg/xattr v0.3.1 + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/restic/chunker v0.2.0 + github.com/russross/blackfriday v1.5.1 // indirect + github.com/satori/go.uuid v1.2.0 // indirect + github.com/smartystreets/assertions v0.0.0-20180820201707-7c9eb446e3cf // indirect + github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a // indirect + github.com/spf13/cobra v0.0.3 + github.com/spf13/pflag v1.0.2 + github.com/stretchr/testify v1.2.2 // indirect + golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 + golang.org/x/net v0.0.0-20180906233101-161cd47e91fd + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f + golang.org/x/sys v0.0.0-20180907202204-917fdcba135d + golang.org/x/text v0.3.0 + google.golang.org/api v0.0.0-20180907210053-b609d5e6b7ab + google.golang.org/appengine v1.1.0 // indirect + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect + gopkg.in/ini.v1 v1.38.2 // indirect + gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 + gopkg.in/yaml.v2 v2.2.1 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 000000000..e49fc4ca5 --- /dev/null +++ b/go.sum @@ -0,0 +1,108 @@ +bazil.org/fuse v0.0.0-20180421153158-65cc252bf669 h1:FNCRpXiquG1aoyqcIWVFmpTSKVcx2bQD38uZZeGtdlw= +bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +cloud.google.com/go v0.27.0 h1:Xa8ZWro6QYKOwDKtxfKsiE0ea2jD39nx32RxtF5RjYE= +cloud.google.com/go v0.27.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/Azure/azure-sdk-for-go v20.1.0+incompatible h1:b8OWFQuH5MPi2LYyAR2Ga+7KVH9ipwiSSSMga04/Urc= +github.com/Azure/azure-sdk-for-go v20.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-autorest v10.15.3+incompatible h1:nhKI/bvazIs3C3TFGoSqKY6hZ8f5od5mb5/UcS6HVIY= +github.com/Azure/go-autorest v10.15.3+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/cenkalti/backoff v2.0.0+incompatible h1:5IIPUHhlnUZbcHQsQou5k1Tn58nJkeJL9U+ig5CHJbY= +github.com/cenkalti/backoff v2.0.0+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cpuguy83/go-md2man v1.0.8 h1:DwoNytLphI8hzS2Af4D0dfaEaiSq2bN05mEm4R6vf8M= +github.com/cpuguy83/go-md2man v1.0.8/go.mod h1:N6JayAiVKtlHSnuTCeuLSQVs75hb8q+dYQLjr7cDsKY= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dnaeon/go-vcr v0.0.0-20180814043457-aafff18a5cc2 h1:G9/PqfhOrt8JXnw0DGTfVoOkKHDhOlEZqhE/cu+NvQM= +github.com/dnaeon/go-vcr v0.0.0-20180814043457-aafff18a5cc2/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/elithrar/simple-scrypt v1.3.0 h1:KIlOlxdoQf9JWKl5lMAJ28SY2URB0XTRDn2TckyzAZg= +github.com/elithrar/simple-scrypt v1.3.0/go.mod h1:U2XQRI95XHY0St410VE3UjT7vuKb1qPwrl/EJwEqnZo= +github.com/go-ini/ini v1.38.2 h1:6Hl/z3p3iFkA0dlDfzYxuFuUGD+kaweypF6btsR2/Q4= +github.com/go-ini/ini v1.38.2/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/gopherjs/gopherjs v0.0.0-20180825215210-0210a2f0f73c h1:16eHWuMGvCjSfgRJKqIzapE78onvvTbdi1rMkU00lZw= +github.com/gopherjs/gopherjs v0.0.0-20180825215210-0210a2f0f73c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/juju/ratelimit v1.0.1 h1:+7AIFJVQ0EQgq/K9+0Krm7m530Du7tIz0METWzN0RgY= +github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= +github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kurin/blazer v0.5.1 h1:mBc4i1uhHJEqU0KvzOgpMHhkwf+EcXvxjWEUS7HG+eY= +github.com/kurin/blazer v0.5.1/go.mod h1:4FCXMUWo9DllR2Do4TtBd377ezyAJ51vB5uTBjt0pGU= +github.com/marstr/guid v1.1.0 h1:/M4H/1G4avsieL6BbUwCOBzulmoeKVP5ux/3mQNnbyI= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/minio/minio-go v6.0.7+incompatible h1:nWABqotkiT/3aLgFnG30doQiwFkDMM9xnGGQnS+Ao6M= +github.com/minio/minio-go v6.0.7+incompatible/go.mod h1:7guKYtitv8dktvNUGrhzmNlA5wrAABTQXCoesZdFQO8= +github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/ncw/swift v1.0.41 h1:kfoTVQKt1A4n0m1Q3YWku9OoXfpo06biqVfi73yseBs= +github.com/ncw/swift v1.0.41/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1 h1:F++O52m40owAmADcojzM+9gyjmMOY/T4oYJkgFDH8RE= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/sftp v1.8.2 h1:3upwlsK5/USEeM5gzIe9eWdzU4sV+kG3gKKg3RLBuWE= +github.com/pkg/sftp v1.8.2/go.mod h1:NxmoDg/QLVWluQDUYG7XBZTLUpKeFa8e3aMf1BfjyHk= +github.com/pkg/xattr v0.3.1 h1:6ceg5jxT3cH4lM5n8S2PmiNeOv61MK08yvvYJwyrPH0= +github.com/pkg/xattr v0.3.1/go.mod h1:CBdxFOf0VLbaj6HKuP2ITOVV7NY6ycPKgIgnSx2ZNVs= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/restic/chunker v0.2.0 h1:GjvmvFuv2mx0iekZs+iAlrioo2UtgsGSSplvoXaVHDU= +github.com/restic/chunker v0.2.0/go.mod h1:VdjruEj+7BU1ZZTW8Qqi1exxRx2Omf2JH0NsUEkQ29s= +github.com/russross/blackfriday v1.5.1 h1:B8ZN6pD4PVofmlDCDUdELeYrbsVIDM/bpjW3v3zgcRc= +github.com/russross/blackfriday v1.5.1/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/smartystreets/assertions v0.0.0-20180820201707-7c9eb446e3cf h1:6V1qxN6Usn4jy8unvggSJz/NC790tefw8Zdy6OZS5co= +github.com/smartystreets/assertions v0.0.0-20180820201707-7c9eb446e3cf/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a h1:JSvGDIbmil4Ui/dDdFBExb7/cmkNjyX5F97oglmvCDo= +github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= +github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v1.0.2 h1:Fy0orTDgHdbnzHcsOgfCN4LtHf0ec3wwtiwJqwvf3Gc= +github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180525142821-c11f84a56e43/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180907202204-917fdcba135d h1:kWn1hlsqeUrk6JsLJO0ZFyz9bMg8u85voZlIuc68ZU4= +golang.org/x/sys v0.0.0-20180907202204-917fdcba135d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +google.golang.org/api v0.0.0-20180907210053-b609d5e6b7ab h1:qNpJa8m9WofZ7RLj+7o15Ppapwm30+RweyIDSNpw8ps= +google.golang.org/api v0.0.0-20180907210053-b609d5e6b7ab/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/ini.v1 v1.38.2 h1:dGcbywv4RufeGeiMycPT/plKB5FtmLKLnWKwBiLhUA4= +gopkg.in/ini.v1 v1.38.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 h1:yiW+nvdHb9LVqSHQBXfZCieqV4fzYhNBql77zY0ykqs= +gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH1EMZPLyqSMM8JbIavyFACoFNk= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/helpers/.gitignore b/helpers/.gitignore new file mode 100644 index 000000000..299320af1 --- /dev/null +++ b/helpers/.gitignore @@ -0,0 +1,2 @@ +build-release-binaries/build-release-binaries +prepare-release/prepare-release diff --git a/helpers/build-release-binaries/main.go b/helpers/build-release-binaries/main.go new file mode 100644 index 000000000..f9e25e447 --- /dev/null +++ b/helpers/build-release-binaries/main.go @@ -0,0 +1,269 @@ +package main + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "time" + + "github.com/spf13/pflag" + "golang.org/x/sync/errgroup" +) + +var opts = struct { + Verbose bool + SourceDir string + OutputDir string + Version string +}{} + +func init() { + pflag.BoolVarP(&opts.Verbose, "verbose", "v", false, "be verbose") + pflag.StringVarP(&opts.SourceDir, "source", "s", "/restic", "path to the source code `directory`") + pflag.StringVarP(&opts.OutputDir, "output", "o", "/output", "path to the output `directory`") + pflag.StringVar(&opts.Version, "version", "", "use `x.y.z` as the version for output files") + pflag.Parse() +} + +func die(f string, args ...interface{}) { + if !strings.HasSuffix(f, "\n") { + f += "\n" + } + f = "\x1b[31m" + f + "\x1b[0m" + fmt.Fprintf(os.Stderr, f, args...) + os.Exit(1) +} + +func msg(f string, args ...interface{}) { + if !strings.HasSuffix(f, "\n") { + f += "\n" + } + f = "\x1b[32m" + f + "\x1b[0m" + fmt.Printf(f, args...) +} + +func verbose(f string, args ...interface{}) { + if !opts.Verbose { + return + } + if !strings.HasSuffix(f, "\n") { + f += "\n" + } + f = "\x1b[32m" + f + "\x1b[0m" + fmt.Printf(f, args...) +} + +func run(cmd string, args ...string) { + c := exec.Command(cmd, args...) + c.Stdout = os.Stdout + c.Stderr = os.Stderr + err := c.Run() + if err != nil { + die("error running %s %s: %v", cmd, args, err) + } +} + +func rm(file string) { + err := os.Remove(file) + + if os.IsNotExist(err) { + err = nil + } + + if err != nil { + die("error removing %v: %v", file, err) + } +} + +func rmdir(dir string) { + err := os.RemoveAll(dir) + if err != nil { + die("error removing %v: %v", dir, err) + } +} + +func mkdir(dir string) { + err := os.MkdirAll(dir, 0755) + if err != nil { + die("mkdir %v: %v", dir, err) + } +} + +func getwd() string { + pwd, err := os.Getwd() + if err != nil { + die("Getwd(): %v", err) + } + return pwd +} + +func abs(dir string) string { + absDir, err := filepath.Abs(dir) + if err != nil { + die("unable to find absolute path for %v: %v", dir, err) + } + return absDir +} + +func build(sourceDir, outputDir, goos, goarch string) (filename string) { + filename = fmt.Sprintf("%v_%v_%v", "restic", goos, goarch) + + if opts.Version != "" { + filename = fmt.Sprintf("%v_%v_%v_%v", "restic", opts.Version, goos, goarch) + } + + if goos == "windows" { + filename += ".exe" + } + outputFile := filepath.Join(outputDir, filename) + + c := exec.Command("go", "build", + "-mod=vendor", + "-o", outputFile, + "-ldflags", "-s -w", + "-tags", "selfupdate", + "./cmd/restic", + ) + c.Stdout = os.Stdout + c.Stderr = os.Stderr + c.Dir = sourceDir + + verbose("run %v %v in %v", "go", c.Args, c.Dir) + + c.Dir = sourceDir + c.Env = append(os.Environ(), + "CGO_ENABLED=0", + "GOPROXY=off", + "GOOS="+goos, + "GOARCH="+goarch, + ) + + err := c.Run() + if err != nil { + die("error building %v/%v: %v", goos, goarch, err) + } + + return filename +} + +func modTime(file string) time.Time { + fi, err := os.Lstat(file) + if err != nil { + die("unable to get modtime of %v: %v", file, err) + } + + return fi.ModTime() +} + +func touch(file string, t time.Time) { + err := os.Chtimes(file, t, t) + if err != nil { + die("unable to update timestamps for %v: %v", file, err) + } +} + +func chmod(file string, mode os.FileMode) { + err := os.Chmod(file, mode) + if err != nil { + die("unable to chmod %v to %s: %v", file, mode, err) + } +} + +func compress(goos, inputDir, filename string) (outputFile string) { + var c *exec.Cmd + switch goos { + case "windows": + outputFile = strings.TrimSuffix(filename, ".exe") + ".zip" + c = exec.Command("zip", "-q", "-X", outputFile, filename) + c.Dir = inputDir + default: + outputFile = filename + ".bz2" + c = exec.Command("bzip2", filename) + c.Dir = inputDir + } + + rm(filepath.Join(inputDir, outputFile)) + + c.Stdout = os.Stdout + c.Stderr = os.Stderr + c.Dir = inputDir + + verbose("run %v %v in %v", "go", c.Args, c.Dir) + + err := c.Run() + if err != nil { + die("error compressing: %v", err) + } + + rm(filepath.Join(inputDir, filename)) + + return outputFile +} + +func buildForTarget(sourceDir, outputDir, goos, goarch string) (filename string) { + mtime := modTime(filepath.Join(sourceDir, "VERSION")) + + filename = build(sourceDir, outputDir, goos, goarch) + touch(filepath.Join(outputDir, filename), mtime) + chmod(filepath.Join(outputDir, filename), 0755) + filename = compress(goos, outputDir, filename) + return filename +} + +func buildTargets(sourceDir, outputDir string, targets map[string][]string) { + start := time.Now() + msg("building with %d workers", runtime.NumCPU()) + + type Job struct{ GOOS, GOARCH string } + + var wg errgroup.Group + ch := make(chan Job) + + for i := 0; i < runtime.NumCPU(); i++ { + wg.Go(func() error { + for job := range ch { + start := time.Now() + verbose("build %v/%v", job.GOOS, job.GOARCH) + buildForTarget(sourceDir, outputDir, job.GOOS, job.GOARCH) + msg("built %v/%v in %.3fs", job.GOOS, job.GOARCH, time.Since(start).Seconds()) + } + return nil + }) + } + + wg.Go(func() error { + for goos, archs := range targets { + for _, goarch := range archs { + ch <- Job{goos, goarch} + } + } + close(ch) + return nil + }) + + _ = wg.Wait() + msg("build finished in %.3fs", time.Since(start).Seconds()) +} + +var defaultBuildTargets = map[string][]string{ + "darwin": []string{"386", "amd64"}, + "freebsd": []string{"386", "amd64", "arm"}, + "linux": []string{"386", "amd64", "arm", "arm64"}, + "openbsd": []string{"386", "amd64"}, + "windows": []string{"386", "amd64"}, +} + +func main() { + if len(pflag.Args()) != 0 { + die("USAGE: build-release-binaries [OPTIONS]") + } + + sourceDir := abs(opts.SourceDir) + outputDir := abs(opts.OutputDir) + mkdir(outputDir) + + buildTargets(sourceDir, outputDir, defaultBuildTargets) +} diff --git a/helpers/prepare-release/main.go b/helpers/prepare-release/main.go new file mode 100644 index 000000000..e42e0e8e6 --- /dev/null +++ b/helpers/prepare-release/main.go @@ -0,0 +1,461 @@ +package main + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + + "github.com/spf13/pflag" +) + +var opts = struct { + Version string + + IgnoreBranchName bool + IgnoreUncommittedChanges bool + IgnoreChangelogVersion bool + IgnoreChangelogReleaseDate bool + IgnoreChangelogCurrent bool + IgnoreDockerBuildGoVersion bool + + OutputDir string +}{} + +var versionRegex = regexp.MustCompile(`^\d+\.\d+\.\d+$`) + +func init() { + pflag.BoolVar(&opts.IgnoreBranchName, "ignore-branch-name", false, "allow releasing from other branches as 'master'") + pflag.BoolVar(&opts.IgnoreUncommittedChanges, "ignore-uncommitted-changes", false, "allow uncommitted changes") + pflag.BoolVar(&opts.IgnoreChangelogVersion, "ignore-changelog-version", false, "ignore missing entry in CHANGELOG.md") + pflag.BoolVar(&opts.IgnoreChangelogReleaseDate, "ignore-changelog-release-date", false, "ignore missing subdir with date in changelog/") + pflag.BoolVar(&opts.IgnoreChangelogCurrent, "ignore-changelog-current", false, "ignore check if CHANGELOG.md is up to date") + pflag.BoolVar(&opts.IgnoreDockerBuildGoVersion, "ignore-docker-build-go-version", false, "ignore check if docker builder go version is up to date") + + pflag.StringVar(&opts.OutputDir, "output-dir", "", "use `dir` as output directory") + + pflag.Parse() +} + +func die(f string, args ...interface{}) { + if !strings.HasSuffix(f, "\n") { + f += "\n" + } + f = "\x1b[31m" + f + "\x1b[0m" + fmt.Fprintf(os.Stderr, f, args...) + os.Exit(1) +} + +func msg(f string, args ...interface{}) { + if !strings.HasSuffix(f, "\n") { + f += "\n" + } + f = "\x1b[32m" + f + "\x1b[0m" + fmt.Printf(f, args...) +} + +func run(cmd string, args ...string) { + c := exec.Command(cmd, args...) + c.Stdout = os.Stdout + c.Stderr = os.Stderr + err := c.Run() + if err != nil { + die("error running %s %s: %v", cmd, args, err) + } +} + +func replace(filename, from, to string) { + reg := regexp.MustCompile(from) + + buf, err := ioutil.ReadFile(filename) + if err != nil { + die("error reading file %v: %v", filename, err) + } + + buf = reg.ReplaceAll(buf, []byte(to)) + err = ioutil.WriteFile(filename, buf, 0644) + if err != nil { + die("error writing file %v: %v", filename, err) + } +} + +func rm(file string) { + err := os.Remove(file) + if err != nil { + die("error removing %v: %v", file, err) + } +} + +func rmdir(dir string) { + err := os.RemoveAll(dir) + if err != nil { + die("error removing %v: %v", dir, err) + } +} + +func mkdir(dir string) { + err := os.Mkdir(dir, 0755) + if err != nil { + die("mkdir %v: %v", dir, err) + } +} + +func getwd() string { + pwd, err := os.Getwd() + if err != nil { + die("Getwd(): %v", err) + } + return pwd +} + +func uncommittedChanges(dirs ...string) string { + args := []string{"status", "--porcelain", "--untracked-files=no"} + if len(dirs) > 0 { + args = append(args, dirs...) + } + + changes, err := exec.Command("git", args...).Output() + if err != nil { + die("unable to run command: %v", err) + } + + return string(changes) +} + +func preCheckBranchMaster() { + if opts.IgnoreBranchName { + return + } + + branch, err := exec.Command("git", "rev-parse", "--abbrev-ref", "HEAD").Output() + if err != nil { + die("error running 'git': %v", err) + } + + if strings.TrimSpace(string(branch)) != "master" { + die("wrong branch: %s", branch) + } +} + +func preCheckUncommittedChanges() { + if opts.IgnoreUncommittedChanges { + return + } + + changes := uncommittedChanges() + if len(changes) > 0 { + die("uncommitted changes found:\n%s\n", changes) + } +} + +func preCheckVersionExists() { + buf, err := exec.Command("git", "tag", "-l").Output() + if err != nil { + die("error running 'git tag -l': %v", err) + } + + sc := bufio.NewScanner(bytes.NewReader(buf)) + for sc.Scan() { + if sc.Err() != nil { + die("error scanning version tags: %v", sc.Err()) + } + + if strings.TrimSpace(sc.Text()) == "v"+opts.Version { + die("tag v%v already exists", opts.Version) + } + } +} + +func preCheckChangelogCurrent() { + if opts.IgnoreChangelogCurrent { + return + } + + // regenerate changelog + run("calens", "--output", "CHANGELOG.md") + + // check for uncommitted changes in changelog + if len(uncommittedChanges("CHANGELOG.md")) > 0 { + msg("committing file CHANGELOG.md") + run("git", "commit", "-m", fmt.Sprintf("Generate CHANGELOG.md for %v", opts.Version), "CHANGELOG.md") + } +} + +func preCheckChangelogRelease() { + if opts.IgnoreChangelogReleaseDate { + return + } + + d, err := os.Open("changelog") + if err != nil { + die("error opening dir: %v", err) + } + + names, err := d.Readdirnames(-1) + if err != nil { + _ = d.Close() + die("error listing dir: %v", err) + } + + err = d.Close() + if err != nil { + die("error closing dir: %v", err) + } + + for _, name := range names { + if strings.HasPrefix(name, opts.Version+"_") { + return + } + } + + die("unable to find subdir with date for version %v in changelog", opts.Version) +} + +func preCheckChangelogVersion() { + if opts.IgnoreChangelogVersion { + return + } + + f, err := os.Open("CHANGELOG.md") + if err != nil { + die("unable to open CHANGELOG.md: %v", err) + } + defer f.Close() + + sc := bufio.NewScanner(f) + for sc.Scan() { + if sc.Err() != nil { + die("error scanning: %v", sc.Err()) + } + + if strings.Contains(strings.TrimSpace(sc.Text()), fmt.Sprintf("Changelog for restic %v", opts.Version)) { + return + } + } + + die("CHANGELOG.md does not contain version %v", opts.Version) +} + +func preCheckDockerBuilderGoVersion() { + if opts.IgnoreDockerBuildGoVersion { + return + } + + buf, err := exec.Command("go", "version").Output() + if err != nil { + die("unable to check local Go version: %v", err) + } + localVersion := strings.TrimSpace(string(buf)) + + msg("update docker container restic/builder") + run("docker", "pull", "restic/builder") + buf, err = exec.Command("docker", "run", "--rm", "restic/builder", "go", "version").Output() + if err != nil { + die("unable to check Go version in docker image: %v", err) + } + containerVersion := strings.TrimSpace(string(buf)) + + if localVersion != containerVersion { + die("version in docker container restic/builder is different:\n local: %v\n container: %v\n", + localVersion, containerVersion) + } +} + +func generateFiles() { + msg("generate files") + run("go", "run", "build.go", "-o", "restic-generate.temp") + + mandir := filepath.Join("doc", "man") + rmdir(mandir) + mkdir(mandir) + run("./restic-generate.temp", "generate", + "--man", "doc/man", + "--zsh-completion", "doc/zsh-completion.zsh", + "--bash-completion", "doc/bash-completion.sh") + rm("restic-generate.temp") + + run("git", "add", "doc") + changes := uncommittedChanges("doc") + if len(changes) > 0 { + msg("committing manpages and auto-completion") + run("git", "commit", "-m", "Update manpages and auto-completion", "doc") + } +} + +var versionPattern = `var version = ".*"` + +const versionCodeFile = "cmd/restic/global.go" + +func updateVersion() { + err := ioutil.WriteFile("VERSION", []byte(opts.Version+"\n"), 0644) + if err != nil { + die("unable to write version to file: %v", err) + } + + newVersion := fmt.Sprintf("var version = %q", opts.Version) + replace(versionCodeFile, versionPattern, newVersion) + + if len(uncommittedChanges("VERSION")) > 0 || len(uncommittedChanges(versionCodeFile)) > 0 { + msg("committing version files") + run("git", "commit", "-m", fmt.Sprintf("Add version for %v", opts.Version), "VERSION", versionCodeFile) + } +} + +func updateVersionDev() { + newVersion := fmt.Sprintf(`var version = "%s-dev (compiled manually)"`, opts.Version) + replace(versionCodeFile, versionPattern, newVersion) + + msg("committing cmd/restic/global.go with dev version") + run("git", "commit", "-m", fmt.Sprintf("Set development version for %v", opts.Version), "VERSION", versionCodeFile) +} + +func addTag() { + tagname := "v" + opts.Version + msg("add tag %v", tagname) + run("git", "tag", "-a", "-s", "-m", tagname, tagname) +} + +func exportTar(version, tarFilename string) { + cmd := fmt.Sprintf("git archive --format=tar --prefix=restic-%s/ v%s | gzip -n > %s", + version, version, tarFilename) + run("sh", "-c", cmd) + msg("build restic-%s.tar.gz", version) +} + +func extractTar(filename, outputDir string) { + msg("extract tar into %v", outputDir) + c := exec.Command("tar", "xz", "--strip-components=1", "-f", filename) + c.Stdout = os.Stdout + c.Stderr = os.Stderr + c.Dir = outputDir + err := c.Run() + if err != nil { + die("error extracting tar: %v", err) + } +} + +func runBuild(sourceDir, outputDir, version string) { + msg("building binaries...") + run("docker", "run", "--rm", + "--volume", sourceDir+":/restic", + "--volume", outputDir+":/output", + "restic/builder", + "go", "run", "-mod=vendor", "helpers/build-release-binaries/main.go", + "--version", version) +} + +func readdir(dir string) []string { + fis, err := ioutil.ReadDir(dir) + if err != nil { + die("readdir %v failed: %v", dir, err) + } + + filenames := make([]string, 0, len(fis)) + for _, fi := range fis { + filenames = append(filenames, fi.Name()) + } + return filenames +} + +func sha256sums(inputDir, outputFile string) { + msg("runnnig sha256sum in %v", inputDir) + + filenames := readdir(inputDir) + + f, err := os.Create(outputFile) + if err != nil { + die("unable to create %v: %v", outputFile, err) + } + + c := exec.Command("sha256sum", filenames...) + c.Stdout = f + c.Stderr = os.Stderr + c.Dir = inputDir + + err = c.Run() + if err != nil { + die("error running sha256sums: %v", err) + } + + err = f.Close() + if err != nil { + die("close %v: %v", outputFile, err) + } +} + +func signFiles(filenames ...string) { + for _, filename := range filenames { + run("gpg", "--armor", "--detach-sign", filename) + } +} + +func updateDocker(outputDir, version string) { + cmd := fmt.Sprintf("bzcat %s/restic_%s_linux_amd64.bz2 > restic", outputDir, version) + run("sh", "-c", cmd) + run("chmod", "+x", "restic") + run("docker", "pull", "alpine:latest") + run("docker", "build", "--rm", "--tag", "restic/restic:latest", "-f", "docker/Dockerfile", ".") + run("docker", "tag", "restic/restic:latest", "restic/restic:"+version) +} + +func tempdir(prefix string) string { + dir, err := ioutil.TempDir(getwd(), prefix) + if err != nil { + die("unable to create temp dir %q: %v", prefix, err) + } + return dir +} + +func main() { + if len(pflag.Args()) == 0 { + die("USAGE: release-version [OPTIONS] VERSION") + } + + opts.Version = pflag.Args()[0] + if !versionRegex.MatchString(opts.Version) { + die("invalid new version") + } + + preCheckBranchMaster() + preCheckUncommittedChanges() + preCheckVersionExists() + preCheckDockerBuilderGoVersion() + preCheckChangelogRelease() + preCheckChangelogCurrent() + preCheckChangelogVersion() + + if opts.OutputDir == "" { + opts.OutputDir = tempdir("build-output-") + } + sourceDir := tempdir("source-") + + msg("using output dir %v", opts.OutputDir) + msg("using source dir %v", sourceDir) + + generateFiles() + updateVersion() + addTag() + updateVersionDev() + + tarFilename := filepath.Join(opts.OutputDir, fmt.Sprintf("restic-%s.tar.gz", opts.Version)) + exportTar(opts.Version, tarFilename) + + extractTar(tarFilename, sourceDir) + runBuild(sourceDir, opts.OutputDir, opts.Version) + rmdir(sourceDir) + + sha256sums(opts.OutputDir, filepath.Join(opts.OutputDir, "SHA256SUMS")) + + signFiles(filepath.Join(opts.OutputDir, "SHA256SUMS"), tarFilename) + + updateDocker(opts.OutputDir, opts.Version) + + msg("done, output dir is %v", opts.OutputDir) + + msg("now run:\n\ngit push --tags origin master\ndocker push restic/restic\n") +} diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go new file mode 100644 index 000000000..4ce9ef597 --- /dev/null +++ b/internal/archiver/archiver.go @@ -0,0 +1,824 @@ +package archiver + +import ( + "context" + "encoding/json" + "os" + "path" + "runtime" + "sort" + "time" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/restic" + tomb "gopkg.in/tomb.v2" +) + +// SelectByNameFunc returns true for all items that should be included (files and +// dirs). If false is returned, files are ignored and dirs are not even walked. +type SelectByNameFunc func(item string) bool + +// SelectFunc returns true for all items that should be included (files and +// dirs). If false is returned, files are ignored and dirs are not even walked. +type SelectFunc func(item string, fi os.FileInfo) bool + +// ErrorFunc is called when an error during archiving occurs. When nil is +// returned, the archiver continues, otherwise it aborts and passes the error +// up the call stack. +type ErrorFunc func(file string, fi os.FileInfo, err error) error + +// ItemStats collects some statistics about a particular file or directory. +type ItemStats struct { + DataBlobs int // number of new data blobs added for this item + DataSize uint64 // sum of the sizes of all new data blobs + TreeBlobs int // number of new tree blobs added for this item + TreeSize uint64 // sum of the sizes of all new tree blobs +} + +// Add adds other to the current ItemStats. +func (s *ItemStats) Add(other ItemStats) { + s.DataBlobs += other.DataBlobs + s.DataSize += other.DataSize + s.TreeBlobs += other.TreeBlobs + s.TreeSize += other.TreeSize +} + +// Archiver saves a directory structure to the repo. +type Archiver struct { + Repo restic.Repository + SelectByName SelectByNameFunc + Select SelectFunc + FS fs.FS + Options Options + + blobSaver *BlobSaver + fileSaver *FileSaver + treeSaver *TreeSaver + + // Error is called for all errors that occur during backup. + Error ErrorFunc + + // CompleteItem is called for all files and dirs once they have been + // processed successfully. The parameter item contains the path as it will + // be in the snapshot after saving. s contains some statistics about this + // particular file/dir. + // + // CompleteItem may be called asynchronously from several different + // goroutines! + CompleteItem func(item string, previous, current *restic.Node, s ItemStats, d time.Duration) + + // StartFile is called when a file is being processed by a worker. + StartFile func(filename string) + + // CompleteBlob is called for all saved blobs for files. + CompleteBlob func(filename string, bytes uint64) + + // WithAtime configures if the access time for files and directories should + // be saved. Enabling it may result in much metadata, so it's off by + // default. + WithAtime bool +} + +// Options is used to configure the archiver. +type Options struct { + // FileReadConcurrency sets how many files are read in concurrently. If + // it's set to zero, at most two files are read in concurrently (which + // turned out to be a good default for most situations). + FileReadConcurrency uint + + // SaveBlobConcurrency sets how many blobs are hashed and saved + // concurrently. If it's set to zero, the default is the number of CPUs + // available in the system. + SaveBlobConcurrency uint + + // SaveTreeConcurrency sets how many trees are marshalled and saved to the + // repo concurrently. + SaveTreeConcurrency uint +} + +// ApplyDefaults returns a copy of o with the default options set for all unset +// fields. +func (o Options) ApplyDefaults() Options { + if o.FileReadConcurrency == 0 { + // two is a sweet spot for almost all situations. We've done some + // experiments documented here: + // https://github.com/borgbackup/borg/issues/3500 + o.FileReadConcurrency = 2 + } + + if o.SaveBlobConcurrency == 0 { + o.SaveBlobConcurrency = uint(runtime.NumCPU()) + } + + if o.SaveTreeConcurrency == 0 { + // use a relatively high concurrency here, having multiple SaveTree + // workers is cheap + o.SaveTreeConcurrency = o.SaveBlobConcurrency * 20 + } + + return o +} + +// New initializes a new archiver. +func New(repo restic.Repository, fs fs.FS, opts Options) *Archiver { + arch := &Archiver{ + Repo: repo, + SelectByName: func(item string) bool { return true }, + Select: func(item string, fi os.FileInfo) bool { return true }, + FS: fs, + Options: opts.ApplyDefaults(), + + CompleteItem: func(string, *restic.Node, *restic.Node, ItemStats, time.Duration) {}, + StartFile: func(string) {}, + CompleteBlob: func(string, uint64) {}, + } + + return arch +} + +// error calls arch.Error if it is set and the error is different from context.Canceled. +func (arch *Archiver) error(item string, fi os.FileInfo, err error) error { + if arch.Error == nil || err == nil { + return err + } + + if err == context.Canceled { + return err + } + + errf := arch.Error(item, fi, err) + if err != errf { + debug.Log("item %v: error was filtered by handler, before: %q, after: %v", item, err, errf) + } + return errf +} + +// saveTree stores a tree in the repo. It checks the index and the known blobs +// before saving anything. +func (arch *Archiver) saveTree(ctx context.Context, t *restic.Tree) (restic.ID, ItemStats, error) { + var s ItemStats + buf, err := json.Marshal(t) + if err != nil { + return restic.ID{}, s, errors.Wrap(err, "MarshalJSON") + } + + // append a newline so that the data is always consistent (json.Encoder + // adds a newline after each object) + buf = append(buf, '\n') + + b := &Buffer{Data: buf} + res := arch.blobSaver.Save(ctx, restic.TreeBlob, b) + + res.Wait(ctx) + if !res.Known() { + s.TreeBlobs++ + s.TreeSize += uint64(len(buf)) + } + return res.ID(), s, nil +} + +// nodeFromFileInfo returns the restic node from a os.FileInfo. +func (arch *Archiver) nodeFromFileInfo(filename string, fi os.FileInfo) (*restic.Node, error) { + node, err := restic.NodeFromFileInfo(filename, fi) + if !arch.WithAtime { + node.AccessTime = node.ModTime + } + return node, errors.Wrap(err, "NodeFromFileInfo") +} + +// loadSubtree tries to load the subtree referenced by node. In case of an error, nil is returned. +func (arch *Archiver) loadSubtree(ctx context.Context, node *restic.Node) *restic.Tree { + if node == nil || node.Type != "dir" || node.Subtree == nil { + return nil + } + + tree, err := arch.Repo.LoadTree(ctx, *node.Subtree) + if err != nil { + debug.Log("unable to load tree %v: %v", node.Subtree.Str(), err) + // TODO: handle error + return nil + } + + return tree +} + +// SaveDir stores a directory in the repo and returns the node. snPath is the +// path within the current snapshot. +func (arch *Archiver) SaveDir(ctx context.Context, snPath string, fi os.FileInfo, dir string, previous *restic.Tree) (d FutureTree, err error) { + debug.Log("%v %v", snPath, dir) + + treeNode, err := arch.nodeFromFileInfo(dir, fi) + if err != nil { + return FutureTree{}, err + } + + names, err := readdirnames(arch.FS, dir) + if err != nil { + return FutureTree{}, err + } + + nodes := make([]FutureNode, 0, len(names)) + + for _, name := range names { + // test if context has been cancelled + if ctx.Err() != nil { + debug.Log("context has been cancelled, aborting") + return FutureTree{}, ctx.Err() + } + + pathname := arch.FS.Join(dir, name) + oldNode := previous.Find(name) + snItem := join(snPath, name) + fn, excluded, err := arch.Save(ctx, snItem, pathname, oldNode) + + // return error early if possible + if err != nil { + err = arch.error(pathname, fi, err) + if err == nil { + // ignore error + continue + } + + return FutureTree{}, err + } + + if excluded { + continue + } + + nodes = append(nodes, fn) + } + + ft := arch.treeSaver.Save(ctx, snPath, treeNode, nodes) + + return ft, nil +} + +// FutureNode holds a reference to a node, FutureFile, or FutureTree. +type FutureNode struct { + snPath, target string + + // kept to call the error callback function + absTarget string + fi os.FileInfo + + node *restic.Node + stats ItemStats + err error + + isFile bool + file FutureFile + isTree bool + tree FutureTree +} + +func (fn *FutureNode) wait(ctx context.Context) { + switch { + case fn.isFile: + // wait for and collect the data for the file + fn.file.Wait(ctx) + fn.node = fn.file.Node() + fn.err = fn.file.Err() + fn.stats = fn.file.Stats() + + // ensure the other stuff can be garbage-collected + fn.file = FutureFile{} + fn.isFile = false + + case fn.isTree: + // wait for and collect the data for the dir + fn.tree.Wait(ctx) + fn.node = fn.tree.Node() + fn.stats = fn.tree.Stats() + + // ensure the other stuff can be garbage-collected + fn.tree = FutureTree{} + fn.isTree = false + } +} + +// Save saves a target (file or directory) to the repo. If the item is +// excluded, this function returns a nil node and error, with excluded set to +// true. +// +// Errors and completion needs to be handled by the caller. +// +// snPath is the path within the current snapshot. +func (arch *Archiver) Save(ctx context.Context, snPath, target string, previous *restic.Node) (fn FutureNode, excluded bool, err error) { + start := time.Now() + + fn = FutureNode{ + snPath: snPath, + target: target, + } + + debug.Log("%v target %q, previous %v", snPath, target, previous) + abstarget, err := arch.FS.Abs(target) + if err != nil { + return FutureNode{}, false, err + } + + fn.absTarget = abstarget + + // exclude files by path before running Lstat to reduce number of lstat calls + if !arch.SelectByName(abstarget) { + debug.Log("%v is excluded by path", target) + return FutureNode{}, true, nil + } + + // get file info and run remaining select functions that require file information + fi, err := arch.FS.Lstat(target) + if !arch.Select(abstarget, fi) { + debug.Log("%v is excluded", target) + return FutureNode{}, true, nil + } + + if err != nil { + debug.Log("lstat() for %v returned error: %v", target, err) + err = arch.error(abstarget, fi, err) + if err != nil { + return FutureNode{}, false, errors.Wrap(err, "Lstat") + } + return FutureNode{}, true, nil + } + + switch { + case fs.IsRegularFile(fi): + debug.Log(" %v regular file", target) + start := time.Now() + + // reopen file and do an fstat() on the open file to check it is still + // a file (and has not been exchanged for e.g. a symlink) + file, err := arch.FS.OpenFile(target, fs.O_RDONLY|fs.O_NOFOLLOW, 0) + if err != nil { + debug.Log("Openfile() for %v returned error: %v", target, err) + err = arch.error(abstarget, fi, err) + if err != nil { + return FutureNode{}, false, errors.Wrap(err, "Lstat") + } + return FutureNode{}, true, nil + } + + fi, err = file.Stat() + if err != nil { + debug.Log("stat() on opened file %v returned error: %v", target, err) + _ = file.Close() + err = arch.error(abstarget, fi, err) + if err != nil { + return FutureNode{}, false, errors.Wrap(err, "Lstat") + } + return FutureNode{}, true, nil + } + + // make sure it's still a file + if !fs.IsRegularFile(fi) { + err = errors.Errorf("file %v changed type, refusing to archive") + err = arch.error(abstarget, fi, err) + if err != nil { + return FutureNode{}, false, err + } + return FutureNode{}, true, nil + } + + // use previous node if the file hasn't changed + if previous != nil && !fileChanged(fi, previous) { + debug.Log("%v hasn't changed, returning old node", target) + arch.CompleteItem(snPath, previous, previous, ItemStats{}, time.Since(start)) + arch.CompleteBlob(snPath, previous.Size) + fn.node = previous + _ = file.Close() + return fn, false, nil + } + + fn.isFile = true + // Save will close the file, we don't need to do that + fn.file = arch.fileSaver.Save(ctx, snPath, file, fi, func() { + arch.StartFile(snPath) + }, func(node *restic.Node, stats ItemStats) { + arch.CompleteItem(snPath, previous, node, stats, time.Since(start)) + }) + + case fi.IsDir(): + debug.Log(" %v dir", target) + + snItem := snPath + "/" + start := time.Now() + oldSubtree := arch.loadSubtree(ctx, previous) + + fn.isTree = true + fn.tree, err = arch.SaveDir(ctx, snPath, fi, target, oldSubtree) + if err == nil { + arch.CompleteItem(snItem, previous, fn.node, fn.stats, time.Since(start)) + } else { + debug.Log("SaveDir for %v returned error: %v", snPath, err) + return FutureNode{}, false, err + } + + case fi.Mode()&os.ModeSocket > 0: + debug.Log(" %v is a socket, ignoring", target) + return FutureNode{}, true, nil + + default: + debug.Log(" %v other", target) + + fn.node, err = arch.nodeFromFileInfo(target, fi) + if err != nil { + return FutureNode{}, false, err + } + } + + debug.Log("return after %.3f", time.Since(start).Seconds()) + + return fn, false, nil +} + +// fileChanged returns true if the file's content has changed since the node +// was created. +func fileChanged(fi os.FileInfo, node *restic.Node) bool { + if node == nil { + return true + } + + // check type change + if node.Type != "file" { + return true + } + + // check modification timestamp + if !fi.ModTime().Equal(node.ModTime) { + return true + } + + // check size + extFI := fs.ExtendedStat(fi) + if uint64(fi.Size()) != node.Size || uint64(extFI.Size) != node.Size { + return true + } + + // check inode + if node.Inode != extFI.Inode { + return true + } + + return false +} + +// join returns all elements separated with a forward slash. +func join(elem ...string) string { + return path.Join(elem...) +} + +// statDir returns the file info for the directory. Symbolic links are +// resolved. If the target directory is not a directory, an error is returned. +func (arch *Archiver) statDir(dir string) (os.FileInfo, error) { + fi, err := arch.FS.Stat(dir) + if err != nil { + return nil, errors.Wrap(err, "Lstat") + } + + tpe := fi.Mode() & (os.ModeType | os.ModeCharDevice) + if tpe != os.ModeDir { + return fi, errors.Errorf("path is not a directory: %v", dir) + } + + return fi, nil +} + +// SaveTree stores a Tree in the repo, returned is the tree. snPath is the path +// within the current snapshot. +func (arch *Archiver) SaveTree(ctx context.Context, snPath string, atree *Tree, previous *restic.Tree) (*restic.Tree, error) { + debug.Log("%v (%v nodes), parent %v", snPath, len(atree.Nodes), previous) + + tree := restic.NewTree() + + futureNodes := make(map[string]FutureNode) + + // iterate over the nodes of atree in lexicographic (=deterministic) order + names := make([]string, 0, len(atree.Nodes)) + for name := range atree.Nodes { + names = append(names, name) + } + sort.Stable(sort.StringSlice(names)) + + for _, name := range names { + subatree := atree.Nodes[name] + + // test if context has been cancelled + if ctx.Err() != nil { + return nil, ctx.Err() + } + + // this is a leaf node + if subatree.Path != "" { + fn, excluded, err := arch.Save(ctx, join(snPath, name), subatree.Path, previous.Find(name)) + + if err != nil { + err = arch.error(subatree.Path, fn.fi, err) + if err == nil { + // ignore error + continue + } + return nil, err + } + + if err != nil { + return nil, err + } + + if !excluded { + futureNodes[name] = fn + } + continue + } + + snItem := join(snPath, name) + "/" + start := time.Now() + + oldNode := previous.Find(name) + oldSubtree := arch.loadSubtree(ctx, oldNode) + + // not a leaf node, archive subtree + subtree, err := arch.SaveTree(ctx, join(snPath, name), &subatree, oldSubtree) + if err != nil { + return nil, err + } + + id, nodeStats, err := arch.saveTree(ctx, subtree) + if err != nil { + return nil, err + } + + if subatree.FileInfoPath == "" { + return nil, errors.Errorf("FileInfoPath for %v/%v is empty", snPath, name) + } + + debug.Log("%v, saved subtree %v as %v", snPath, subtree, id.Str()) + + fi, err := arch.statDir(subatree.FileInfoPath) + if err != nil { + return nil, err + } + + debug.Log("%v, dir node data loaded from %v", snPath, subatree.FileInfoPath) + + node, err := arch.nodeFromFileInfo(subatree.FileInfoPath, fi) + if err != nil { + return nil, err + } + + node.Name = name + node.Subtree = &id + + err = tree.Insert(node) + if err != nil { + return nil, err + } + + arch.CompleteItem(snItem, oldNode, node, nodeStats, time.Since(start)) + } + + debug.Log("waiting on %d nodes", len(futureNodes)) + + // process all futures + for name, fn := range futureNodes { + fn.wait(ctx) + + // return the error, or ignore it + if fn.err != nil { + fn.err = arch.error(fn.target, fn.fi, fn.err) + if fn.err == nil { + // ignore error + continue + } + + return nil, fn.err + } + + // when the error is ignored, the node could not be saved, so ignore it + if fn.node == nil { + debug.Log("%v excluded: %v", fn.snPath, fn.target) + continue + } + + fn.node.Name = name + + err := tree.Insert(fn.node) + if err != nil { + return nil, err + } + } + + return tree, nil +} + +type fileInfoSlice []os.FileInfo + +func (fi fileInfoSlice) Len() int { + return len(fi) +} + +func (fi fileInfoSlice) Swap(i, j int) { + fi[i], fi[j] = fi[j], fi[i] +} + +func (fi fileInfoSlice) Less(i, j int) bool { + return fi[i].Name() < fi[j].Name() +} + +func readdir(filesystem fs.FS, dir string) ([]os.FileInfo, error) { + f, err := filesystem.OpenFile(dir, fs.O_RDONLY|fs.O_NOFOLLOW, 0) + if err != nil { + return nil, errors.Wrap(err, "Open") + } + + entries, err := f.Readdir(-1) + if err != nil { + _ = f.Close() + return nil, errors.Wrapf(err, "Readdir %v failed", dir) + } + + err = f.Close() + if err != nil { + return nil, err + } + + sort.Sort(fileInfoSlice(entries)) + return entries, nil +} + +func readdirnames(filesystem fs.FS, dir string) ([]string, error) { + f, err := filesystem.OpenFile(dir, fs.O_RDONLY|fs.O_NOFOLLOW, 0) + if err != nil { + return nil, errors.Wrap(err, "Open") + } + + entries, err := f.Readdirnames(-1) + if err != nil { + _ = f.Close() + return nil, errors.Wrapf(err, "Readdirnames %v failed", dir) + } + + err = f.Close() + if err != nil { + return nil, err + } + + sort.Sort(sort.StringSlice(entries)) + return entries, nil +} + +// resolveRelativeTargets replaces targets that only contain relative +// directories ("." or "../../") with the contents of the directory. Each +// element of target is processed with fs.Clean(). +func resolveRelativeTargets(fs fs.FS, targets []string) ([]string, error) { + debug.Log("targets before resolving: %v", targets) + result := make([]string, 0, len(targets)) + for _, target := range targets { + target = fs.Clean(target) + pc, _ := pathComponents(fs, target, false) + if len(pc) > 0 { + result = append(result, target) + continue + } + + debug.Log("replacing %q with readdir(%q)", target, target) + entries, err := readdirnames(fs, target) + if err != nil { + return nil, err + } + + for _, name := range entries { + result = append(result, fs.Join(target, name)) + } + } + + debug.Log("targets after resolving: %v", result) + return result, nil +} + +// SnapshotOptions collect attributes for a new snapshot. +type SnapshotOptions struct { + Tags []string + Hostname string + Excludes []string + Time time.Time + ParentSnapshot restic.ID +} + +// loadParentTree loads a tree referenced by snapshot id. If id is null, nil is returned. +func (arch *Archiver) loadParentTree(ctx context.Context, snapshotID restic.ID) *restic.Tree { + if snapshotID.IsNull() { + return nil + } + + debug.Log("load parent snapshot %v", snapshotID) + sn, err := restic.LoadSnapshot(ctx, arch.Repo, snapshotID) + if err != nil { + debug.Log("unable to load snapshot %v: %v", snapshotID, err) + return nil + } + + if sn.Tree == nil { + debug.Log("snapshot %v has empty tree %v", snapshotID) + return nil + } + + debug.Log("load parent tree %v", *sn.Tree) + tree, err := arch.Repo.LoadTree(ctx, *sn.Tree) + if err != nil { + debug.Log("unable to load tree %v: %v", *sn.Tree, err) + return nil + } + return tree +} + +// runWorkers starts the worker pools, which are stopped when the context is cancelled. +func (arch *Archiver) runWorkers(ctx context.Context, t *tomb.Tomb) { + arch.blobSaver = NewBlobSaver(ctx, t, arch.Repo, arch.Options.SaveBlobConcurrency) + + arch.fileSaver = NewFileSaver(ctx, t, + arch.FS, + arch.blobSaver.Save, + arch.Repo.Config().ChunkerPolynomial, + arch.Options.FileReadConcurrency, arch.Options.SaveBlobConcurrency) + arch.fileSaver.CompleteBlob = arch.CompleteBlob + arch.fileSaver.NodeFromFileInfo = arch.nodeFromFileInfo + + arch.treeSaver = NewTreeSaver(ctx, t, arch.Options.SaveTreeConcurrency, arch.saveTree, arch.Error) +} + +// Snapshot saves several targets and returns a snapshot. +func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts SnapshotOptions) (*restic.Snapshot, restic.ID, error) { + cleanTargets, err := resolveRelativeTargets(arch.FS, targets) + if err != nil { + return nil, restic.ID{}, err + } + + atree, err := NewTree(arch.FS, cleanTargets) + if err != nil { + return nil, restic.ID{}, err + } + + var t tomb.Tomb + wctx := t.Context(ctx) + + arch.runWorkers(wctx, &t) + + start := time.Now() + + debug.Log("starting snapshot") + rootTreeID, stats, err := func() (restic.ID, ItemStats, error) { + tree, err := arch.SaveTree(wctx, "/", atree, arch.loadParentTree(wctx, opts.ParentSnapshot)) + if err != nil { + return restic.ID{}, ItemStats{}, err + } + + if len(tree.Nodes) == 0 { + return restic.ID{}, ItemStats{}, errors.New("snapshot is empty") + } + + return arch.saveTree(wctx, tree) + }() + debug.Log("saved tree, error: %v", err) + + t.Kill(nil) + werr := t.Wait() + debug.Log("err is %v, werr is %v", err, werr) + if err == nil || errors.Cause(err) == context.Canceled { + err = werr + } + + if err != nil { + debug.Log("error while saving tree: %v", err) + return nil, restic.ID{}, err + } + + arch.CompleteItem("/", nil, nil, stats, time.Since(start)) + + err = arch.Repo.Flush(ctx) + if err != nil { + return nil, restic.ID{}, err + } + + err = arch.Repo.SaveIndex(ctx) + if err != nil { + return nil, restic.ID{}, err + } + + sn, err := restic.NewSnapshot(targets, opts.Tags, opts.Hostname, opts.Time) + sn.Excludes = opts.Excludes + if !opts.ParentSnapshot.IsNull() { + id := opts.ParentSnapshot + sn.Parent = &id + } + sn.Tree = &rootTreeID + + id, err := arch.Repo.SaveJSONUnpacked(ctx, restic.SnapshotFile, sn) + if err != nil { + return nil, restic.ID{}, err + } + + return sn, id, nil +} diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go new file mode 100644 index 000000000..e7cda551a --- /dev/null +++ b/internal/archiver/archiver_test.go @@ -0,0 +1,1902 @@ +package archiver + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "sync/atomic" + "syscall" + "testing" + "time" + + "github.com/restic/restic/internal/checker" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + restictest "github.com/restic/restic/internal/test" + tomb "gopkg.in/tomb.v2" +) + +func prepareTempdirRepoSrc(t testing.TB, src TestDir) (tempdir string, repo restic.Repository, cleanup func()) { + tempdir, removeTempdir := restictest.TempDir(t) + repo, removeRepository := repository.TestRepository(t) + + TestCreateFiles(t, tempdir, src) + + cleanup = func() { + removeRepository() + removeTempdir() + } + + return tempdir, repo, cleanup +} + +func saveFile(t testing.TB, repo restic.Repository, filename string, filesystem fs.FS) (*restic.Node, ItemStats) { + var tmb tomb.Tomb + ctx := tmb.Context(context.Background()) + + arch := New(repo, filesystem, Options{}) + arch.runWorkers(ctx, &tmb) + + arch.Error = func(item string, fi os.FileInfo, err error) error { + t.Errorf("archiver error for %v: %v", item, err) + return err + } + + var ( + completeCallbackNode *restic.Node + completeCallbackStats ItemStats + completeCallback bool + + startCallback bool + ) + + complete := func(node *restic.Node, stats ItemStats) { + completeCallback = true + completeCallbackNode = node + completeCallbackStats = stats + } + + start := func() { + startCallback = true + } + + file, err := arch.FS.OpenFile(filename, fs.O_RDONLY|fs.O_NOFOLLOW, 0) + if err != nil { + t.Fatal(err) + } + + fi, err := file.Stat() + if err != nil { + t.Fatal(err) + } + + res := arch.fileSaver.Save(ctx, "/", file, fi, start, complete) + + res.Wait(ctx) + if res.Err() != nil { + t.Fatal(res.Err()) + } + + tmb.Kill(nil) + err = tmb.Wait() + if err != nil { + t.Fatal(err) + } + + err = repo.Flush(ctx) + if err != nil { + t.Fatal(err) + } + + err = repo.SaveIndex(ctx) + if err != nil { + t.Fatal(err) + } + + if !startCallback { + t.Errorf("start callback did not happen") + } + + if !completeCallback { + t.Errorf("complete callback did not happen") + } + + if completeCallbackNode == nil { + t.Errorf("no node returned for complete callback") + } + + if completeCallbackNode != nil && !res.Node().Equals(*completeCallbackNode) { + t.Errorf("different node returned for complete callback") + } + + if completeCallbackStats != res.Stats() { + t.Errorf("different stats return for complete callback, want:\n %v\ngot:\n %v", res.Stats(), completeCallbackStats) + } + + return res.Node(), res.Stats() +} + +func TestArchiverSaveFile(t *testing.T) { + var tests = []TestFile{ + TestFile{Content: ""}, + TestFile{Content: "foo"}, + TestFile{Content: string(restictest.Random(23, 12*1024*1024+1287898))}, + } + + for _, testfile := range tests { + t.Run("", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tempdir, repo, cleanup := prepareTempdirRepoSrc(t, TestDir{"file": testfile}) + defer cleanup() + + node, stats := saveFile(t, repo, filepath.Join(tempdir, "file"), fs.Track{FS: fs.Local{}}) + + TestEnsureFileContent(ctx, t, repo, "file", node, testfile) + if stats.DataSize != uint64(len(testfile.Content)) { + t.Errorf("wrong stats returned in DataSize, want %d, got %d", len(testfile.Content), stats.DataSize) + } + if stats.DataBlobs <= 0 && len(testfile.Content) > 0 { + t.Errorf("wrong stats returned in DataBlobs, want > 0, got %d", stats.DataBlobs) + } + if stats.TreeSize != 0 { + t.Errorf("wrong stats returned in TreeSize, want 0, got %d", stats.TreeSize) + } + if stats.TreeBlobs != 0 { + t.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs) + } + }) + } +} + +func TestArchiverSaveFileReaderFS(t *testing.T) { + var tests = []struct { + Data string + }{ + {Data: ""}, + {Data: "foo"}, + {Data: string(restictest.Random(23, 12*1024*1024+1287898))}, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + ts := time.Now() + filename := "xx" + readerFs := &fs.Reader{ + ModTime: ts, + Mode: 0123, + Name: filename, + ReadCloser: ioutil.NopCloser(strings.NewReader(test.Data)), + } + + node, stats := saveFile(t, repo, filename, readerFs) + + TestEnsureFileContent(ctx, t, repo, "file", node, TestFile{Content: test.Data}) + if stats.DataSize != uint64(len(test.Data)) { + t.Errorf("wrong stats returned in DataSize, want %d, got %d", len(test.Data), stats.DataSize) + } + if stats.DataBlobs <= 0 && len(test.Data) > 0 { + t.Errorf("wrong stats returned in DataBlobs, want > 0, got %d", stats.DataBlobs) + } + if stats.TreeSize != 0 { + t.Errorf("wrong stats returned in TreeSize, want 0, got %d", stats.TreeSize) + } + if stats.TreeBlobs != 0 { + t.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs) + } + }) + } +} + +func TestArchiverSave(t *testing.T) { + var tests = []TestFile{ + TestFile{Content: ""}, + TestFile{Content: "foo"}, + TestFile{Content: string(restictest.Random(23, 12*1024*1024+1287898))}, + } + + for _, testfile := range tests { + t.Run("", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tempdir, repo, cleanup := prepareTempdirRepoSrc(t, TestDir{"file": testfile}) + defer cleanup() + + var tmb tomb.Tomb + + arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) + arch.Error = func(item string, fi os.FileInfo, err error) error { + t.Errorf("archiver error for %v: %v", item, err) + return err + } + arch.runWorkers(tmb.Context(ctx), &tmb) + + node, excluded, err := arch.Save(ctx, "/", filepath.Join(tempdir, "file"), nil) + if err != nil { + t.Fatal(err) + } + + if excluded { + t.Errorf("Save() excluded the node, that's unexpected") + } + + node.wait(ctx) + if node.err != nil { + t.Fatal(node.err) + } + + if node.node == nil { + t.Fatalf("returned node is nil") + } + + stats := node.stats + + err = repo.Flush(ctx) + if err != nil { + t.Fatal(err) + } + + TestEnsureFileContent(ctx, t, repo, "file", node.node, testfile) + if stats.DataSize != uint64(len(testfile.Content)) { + t.Errorf("wrong stats returned in DataSize, want %d, got %d", len(testfile.Content), stats.DataSize) + } + if stats.DataBlobs <= 0 && len(testfile.Content) > 0 { + t.Errorf("wrong stats returned in DataBlobs, want > 0, got %d", stats.DataBlobs) + } + if stats.TreeSize != 0 { + t.Errorf("wrong stats returned in TreeSize, want 0, got %d", stats.TreeSize) + } + if stats.TreeBlobs != 0 { + t.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs) + } + }) + } +} + +func TestArchiverSaveReaderFS(t *testing.T) { + var tests = []struct { + Data string + }{ + {Data: ""}, + {Data: "foo"}, + {Data: string(restictest.Random(23, 12*1024*1024+1287898))}, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + ts := time.Now() + filename := "xx" + readerFs := &fs.Reader{ + ModTime: ts, + Mode: 0123, + Name: filename, + ReadCloser: ioutil.NopCloser(strings.NewReader(test.Data)), + } + + var tmb tomb.Tomb + + arch := New(repo, readerFs, Options{}) + arch.Error = func(item string, fi os.FileInfo, err error) error { + t.Errorf("archiver error for %v: %v", item, err) + return err + } + arch.runWorkers(tmb.Context(ctx), &tmb) + + node, excluded, err := arch.Save(ctx, "/", filename, nil) + t.Logf("Save returned %v %v", node, err) + if err != nil { + t.Fatal(err) + } + + if excluded { + t.Errorf("Save() excluded the node, that's unexpected") + } + + node.wait(ctx) + if node.err != nil { + t.Fatal(node.err) + } + + if node.node == nil { + t.Fatalf("returned node is nil") + } + + stats := node.stats + + err = repo.Flush(ctx) + if err != nil { + t.Fatal(err) + } + + TestEnsureFileContent(ctx, t, repo, "file", node.node, TestFile{Content: test.Data}) + if stats.DataSize != uint64(len(test.Data)) { + t.Errorf("wrong stats returned in DataSize, want %d, got %d", len(test.Data), stats.DataSize) + } + if stats.DataBlobs <= 0 && len(test.Data) > 0 { + t.Errorf("wrong stats returned in DataBlobs, want > 0, got %d", stats.DataBlobs) + } + if stats.TreeSize != 0 { + t.Errorf("wrong stats returned in TreeSize, want 0, got %d", stats.TreeSize) + } + if stats.TreeBlobs != 0 { + t.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs) + } + }) + } +} + +func BenchmarkArchiverSaveFileSmall(b *testing.B) { + const fileSize = 4 * 1024 + d := TestDir{"file": TestFile{ + Content: string(restictest.Random(23, fileSize)), + }} + + b.SetBytes(fileSize) + + for i := 0; i < b.N; i++ { + b.StopTimer() + tempdir, repo, cleanup := prepareTempdirRepoSrc(b, d) + b.StartTimer() + + _, stats := saveFile(b, repo, filepath.Join(tempdir, "file"), fs.Track{FS: fs.Local{}}) + + b.StopTimer() + if stats.DataSize != fileSize { + b.Errorf("wrong stats returned in DataSize, want %d, got %d", fileSize, stats.DataSize) + } + if stats.DataBlobs <= 0 { + b.Errorf("wrong stats returned in DataBlobs, want > 0, got %d", stats.DataBlobs) + } + if stats.TreeSize != 0 { + b.Errorf("wrong stats returned in TreeSize, want 0, got %d", stats.TreeSize) + } + if stats.TreeBlobs != 0 { + b.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs) + } + cleanup() + b.StartTimer() + } +} + +func BenchmarkArchiverSaveFileLarge(b *testing.B) { + const fileSize = 40*1024*1024 + 1287898 + d := TestDir{"file": TestFile{ + Content: string(restictest.Random(23, fileSize)), + }} + + b.SetBytes(fileSize) + + for i := 0; i < b.N; i++ { + b.StopTimer() + tempdir, repo, cleanup := prepareTempdirRepoSrc(b, d) + b.StartTimer() + + _, stats := saveFile(b, repo, filepath.Join(tempdir, "file"), fs.Track{FS: fs.Local{}}) + + b.StopTimer() + if stats.DataSize != fileSize { + b.Errorf("wrong stats returned in DataSize, want %d, got %d", fileSize, stats.DataSize) + } + if stats.DataBlobs <= 0 { + b.Errorf("wrong stats returned in DataBlobs, want > 0, got %d", stats.DataBlobs) + } + if stats.TreeSize != 0 { + b.Errorf("wrong stats returned in TreeSize, want 0, got %d", stats.TreeSize) + } + if stats.TreeBlobs != 0 { + b.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs) + } + cleanup() + b.StartTimer() + } +} + +type blobCountingRepo struct { + restic.Repository + + m sync.Mutex + saved map[restic.BlobHandle]uint +} + +func (repo *blobCountingRepo) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID) (restic.ID, error) { + id, err := repo.Repository.SaveBlob(ctx, t, buf, id) + h := restic.BlobHandle{ID: id, Type: t} + repo.m.Lock() + repo.saved[h]++ + repo.m.Unlock() + return id, err +} + +func (repo *blobCountingRepo) SaveTree(ctx context.Context, t *restic.Tree) (restic.ID, error) { + id, err := repo.Repository.SaveTree(ctx, t) + h := restic.BlobHandle{ID: id, Type: restic.TreeBlob} + repo.m.Lock() + repo.saved[h]++ + repo.m.Unlock() + return id, err +} + +func appendToFile(t testing.TB, filename string, data []byte) { + f, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644) + if err != nil { + t.Fatal(err) + } + + _, err = f.Write(data) + if err != nil { + _ = f.Close() + t.Fatal(err) + } + + err = f.Close() + if err != nil { + t.Fatal(err) + } +} + +func TestArchiverSaveFileIncremental(t *testing.T) { + tempdir, removeTempdir := restictest.TempDir(t) + defer removeTempdir() + + testRepo, removeRepository := repository.TestRepository(t) + defer removeRepository() + + repo := &blobCountingRepo{ + Repository: testRepo, + saved: make(map[restic.BlobHandle]uint), + } + + data := restictest.Random(23, 512*1024+887898) + testfile := filepath.Join(tempdir, "testfile") + + for i := 0; i < 3; i++ { + appendToFile(t, testfile, data) + node, _ := saveFile(t, repo, testfile, fs.Track{FS: fs.Local{}}) + + t.Logf("node blobs: %v", node.Content) + + for h, n := range repo.saved { + if n > 1 { + t.Errorf("iteration %v: blob %v saved more than once (%d times)", i, h, n) + } + } + } +} + +func save(t testing.TB, filename string, data []byte) { + f, err := os.Create(filename) + if err != nil { + t.Fatal(err) + } + + _, err = f.Write(data) + if err != nil { + t.Fatal(err) + } + + err = f.Sync() + if err != nil { + t.Fatal(err) + } + + err = f.Close() + if err != nil { + t.Fatal(err) + } +} + +func lstat(t testing.TB, name string) os.FileInfo { + fi, err := os.Lstat(name) + if err != nil { + t.Fatal(err) + } + + return fi +} + +func setTimestamp(t testing.TB, filename string, atime, mtime time.Time) { + var utimes = [...]syscall.Timespec{ + syscall.NsecToTimespec(atime.UnixNano()), + syscall.NsecToTimespec(mtime.UnixNano()), + } + + err := syscall.UtimesNano(filename, utimes[:]) + if err != nil { + t.Fatal(err) + } +} + +func remove(t testing.TB, filename string) { + err := os.Remove(filename) + if err != nil { + t.Fatal(err) + } +} + +func nodeFromFI(t testing.TB, filename string, fi os.FileInfo) *restic.Node { + node, err := restic.NodeFromFileInfo(filename, fi) + if err != nil { + t.Fatal(err) + } + + return node +} + +func TestFileChanged(t *testing.T) { + var defaultContent = []byte("foobar") + + var d = 50 * time.Millisecond + if runtime.GOOS == "darwin" { + // on older darwin instances the file system only supports one second + // granularity + d = time.Second + } + + sleep := func() { + time.Sleep(d) + } + + var tests = []struct { + Name string + Content []byte + Modify func(t testing.TB, filename string) + }{ + { + Name: "same-content-new-file", + Modify: func(t testing.TB, filename string) { + remove(t, filename) + sleep() + save(t, filename, defaultContent) + }, + }, + { + Name: "same-content-new-timestamp", + Modify: func(t testing.TB, filename string) { + sleep() + save(t, filename, defaultContent) + }, + }, + { + Name: "other-content", + Modify: func(t testing.TB, filename string) { + remove(t, filename) + sleep() + save(t, filename, []byte("xxxxxx")) + }, + }, + { + Name: "longer-content", + Modify: func(t testing.TB, filename string) { + save(t, filename, []byte("xxxxxxxxxxxxxxxxxxxxxx")) + }, + }, + { + Name: "new-file", + Modify: func(t testing.TB, filename string) { + remove(t, filename) + sleep() + save(t, filename, defaultContent) + }, + }, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + tempdir, cleanup := restictest.TempDir(t) + defer cleanup() + + filename := filepath.Join(tempdir, "file") + content := defaultContent + if test.Content != nil { + content = test.Content + } + save(t, filename, content) + + fiBefore := lstat(t, filename) + node := nodeFromFI(t, filename, fiBefore) + + if fileChanged(fiBefore, node) { + t.Fatalf("unchanged file detected as changed") + } + + test.Modify(t, filename) + + fiAfter := lstat(t, filename) + if !fileChanged(fiAfter, node) { + t.Fatalf("modified file detected as unchanged") + } + }) + } +} + +func TestFilChangedSpecialCases(t *testing.T) { + tempdir, cleanup := restictest.TempDir(t) + defer cleanup() + + filename := filepath.Join(tempdir, "file") + content := []byte("foobar") + save(t, filename, content) + + t.Run("nil-node", func(t *testing.T) { + fi := lstat(t, filename) + if !fileChanged(fi, nil) { + t.Fatal("nil node detected as unchanged") + } + }) + + t.Run("type-change", func(t *testing.T) { + fi := lstat(t, filename) + node := nodeFromFI(t, filename, fi) + node.Type = "symlink" + if !fileChanged(fi, node) { + t.Fatal("node with changed type detected as unchanged") + } + }) +} + +func TestArchiverSaveDir(t *testing.T) { + const targetNodeName = "targetdir" + + var tests = []struct { + src TestDir + chdir string + target string + want TestDir + }{ + { + src: TestDir{ + "targetfile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))}, + }, + target: ".", + want: TestDir{ + "targetdir": TestDir{ + "targetfile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))}, + }, + }, + }, + { + src: TestDir{ + "targetdir": TestDir{ + "foo": TestFile{Content: "foo"}, + "emptyfile": TestFile{Content: ""}, + "bar": TestFile{Content: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"}, + "largefile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))}, + "largerfile": TestFile{Content: string(restictest.Random(234, 5*1024*1024+5000))}, + }, + }, + target: "targetdir", + }, + { + src: TestDir{ + "foo": TestFile{Content: "foo"}, + "emptyfile": TestFile{Content: ""}, + "bar": TestFile{Content: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"}, + }, + target: ".", + want: TestDir{ + "targetdir": TestDir{ + "foo": TestFile{Content: "foo"}, + "emptyfile": TestFile{Content: ""}, + "bar": TestFile{Content: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"}, + }, + }, + }, + { + src: TestDir{ + "foo": TestDir{ + "subdir": TestDir{ + "x": TestFile{Content: "xxx"}, + "y": TestFile{Content: "yyyyyyyyyyyyyyyy"}, + "z": TestFile{Content: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"}, + }, + "file": TestFile{Content: "just a test"}, + }, + }, + chdir: "foo/subdir", + target: "../../", + want: TestDir{ + "targetdir": TestDir{ + "foo": TestDir{ + "subdir": TestDir{ + "x": TestFile{Content: "xxx"}, + "y": TestFile{Content: "yyyyyyyyyyyyyyyy"}, + "z": TestFile{Content: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"}, + }, + "file": TestFile{Content: "just a test"}, + }, + }, + }, + }, + { + src: TestDir{ + "foo": TestDir{ + "file": TestFile{Content: "just a test"}, + "file2": TestFile{Content: "again"}, + }, + }, + target: "./foo", + want: TestDir{ + "targetdir": TestDir{ + "file": TestFile{Content: "just a test"}, + "file2": TestFile{Content: "again"}, + }, + }, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + var tmb tomb.Tomb + ctx := tmb.Context(context.Background()) + + tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src) + defer cleanup() + + arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) + arch.runWorkers(ctx, &tmb) + + chdir := tempdir + if test.chdir != "" { + chdir = filepath.Join(chdir, test.chdir) + } + + back := fs.TestChdir(t, chdir) + defer back() + + fi, err := fs.Lstat(test.target) + if err != nil { + t.Fatal(err) + } + + ft, err := arch.SaveDir(ctx, "/", fi, test.target, nil) + if err != nil { + t.Fatal(err) + } + + ft.Wait(ctx) + node, stats := ft.Node(), ft.Stats() + + tmb.Kill(nil) + err = tmb.Wait() + if err != nil { + t.Fatal(err) + } + + t.Logf("stats: %v", stats) + if stats.DataSize != 0 { + t.Errorf("wrong stats returned in DataSize, want 0, got %d", stats.DataSize) + } + if stats.DataBlobs != 0 { + t.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs) + } + if stats.TreeSize <= 0 { + t.Errorf("wrong stats returned in TreeSize, want > 0, got %d", stats.TreeSize) + } + if stats.TreeBlobs <= 0 { + t.Errorf("wrong stats returned in TreeBlobs, want > 0, got %d", stats.TreeBlobs) + } + + node.Name = targetNodeName + tree := &restic.Tree{Nodes: []*restic.Node{node}} + treeID, err := repo.SaveTree(ctx, tree) + if err != nil { + t.Fatal(err) + } + + err = repo.Flush(ctx) + if err != nil { + t.Fatal(err) + } + + err = repo.SaveIndex(ctx) + if err != nil { + t.Fatal(err) + } + + want := test.want + if want == nil { + want = test.src + } + TestEnsureTree(ctx, t, "/", repo, treeID, want) + }) + } +} + +func TestArchiverSaveDirIncremental(t *testing.T) { + tempdir, removeTempdir := restictest.TempDir(t) + defer removeTempdir() + + testRepo, removeRepository := repository.TestRepository(t) + defer removeRepository() + + repo := &blobCountingRepo{ + Repository: testRepo, + saved: make(map[restic.BlobHandle]uint), + } + + appendToFile(t, filepath.Join(tempdir, "testfile"), []byte("foobar")) + + // save the empty directory several times in a row, then have a look if the + // archiver did save the same tree several times + for i := 0; i < 5; i++ { + var tmb tomb.Tomb + ctx := tmb.Context(context.Background()) + + arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) + arch.runWorkers(ctx, &tmb) + + fi, err := fs.Lstat(tempdir) + if err != nil { + t.Fatal(err) + } + + ft, err := arch.SaveDir(ctx, "/", fi, tempdir, nil) + if err != nil { + t.Fatal(err) + } + + ft.Wait(ctx) + node, stats := ft.Node(), ft.Stats() + + tmb.Kill(nil) + err = tmb.Wait() + if err != nil { + t.Fatal(err) + } + + if i == 0 { + // operation must have added new tree data + if stats.DataSize != 0 { + t.Errorf("wrong stats returned in DataSize, want 0, got %d", stats.DataSize) + } + if stats.DataBlobs != 0 { + t.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs) + } + if stats.TreeSize <= 0 { + t.Errorf("wrong stats returned in TreeSize, want > 0, got %d", stats.TreeSize) + } + if stats.TreeBlobs <= 0 { + t.Errorf("wrong stats returned in TreeBlobs, want > 0, got %d", stats.TreeBlobs) + } + } else { + // operation must not have added any new data + if stats.DataSize != 0 { + t.Errorf("wrong stats returned in DataSize, want 0, got %d", stats.DataSize) + } + if stats.DataBlobs != 0 { + t.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs) + } + if stats.TreeSize != 0 { + t.Errorf("wrong stats returned in TreeSize, want 0, got %d", stats.TreeSize) + } + if stats.TreeBlobs != 0 { + t.Errorf("wrong stats returned in TreeBlobs, want 0, got %d", stats.TreeBlobs) + } + } + + t.Logf("node subtree %v", node.Subtree) + + err = repo.Flush(ctx) + if err != nil { + t.Fatal(err) + } + + err = repo.SaveIndex(ctx) + if err != nil { + t.Fatal(err) + } + + for h, n := range repo.saved { + if n > 1 { + t.Errorf("iteration %v: blob %v saved more than once (%d times)", i, h, n) + } + } + } +} + +func TestArchiverSaveTree(t *testing.T) { + symlink := func(from, to string) func(t testing.TB) { + return func(t testing.TB) { + err := os.Symlink(from, to) + if err != nil { + t.Fatal(err) + } + } + } + + var tests = []struct { + src TestDir + prepare func(t testing.TB) + targets []string + want TestDir + }{ + { + src: TestDir{ + "targetfile": TestFile{Content: string("foobar")}, + }, + targets: []string{"targetfile"}, + want: TestDir{ + "targetfile": TestFile{Content: string("foobar")}, + }, + }, + { + src: TestDir{ + "targetfile": TestFile{Content: string("foobar")}, + }, + prepare: symlink("targetfile", "filesymlink"), + targets: []string{"targetfile", "filesymlink"}, + want: TestDir{ + "targetfile": TestFile{Content: string("foobar")}, + "filesymlink": TestSymlink{Target: "targetfile"}, + }, + }, + { + src: TestDir{ + "dir": TestDir{ + "subdir": TestDir{ + "subsubdir": TestDir{ + "targetfile": TestFile{Content: string("foobar")}, + }, + }, + "otherfile": TestFile{Content: string("xxx")}, + }, + }, + prepare: symlink("subdir", filepath.FromSlash("dir/symlink")), + targets: []string{filepath.FromSlash("dir/symlink")}, + want: TestDir{ + "dir": TestDir{ + "symlink": TestSymlink{Target: "subdir"}, + }, + }, + }, + { + src: TestDir{ + "dir": TestDir{ + "subdir": TestDir{ + "subsubdir": TestDir{ + "targetfile": TestFile{Content: string("foobar")}, + }, + }, + "otherfile": TestFile{Content: string("xxx")}, + }, + }, + prepare: symlink("subdir", filepath.FromSlash("dir/symlink")), + targets: []string{filepath.FromSlash("dir/symlink/subsubdir")}, + want: TestDir{ + "dir": TestDir{ + "symlink": TestDir{ + "subsubdir": TestDir{ + "targetfile": TestFile{Content: string("foobar")}, + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + var tmb tomb.Tomb + ctx := tmb.Context(context.Background()) + + tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src) + defer cleanup() + + testFS := fs.Track{FS: fs.Local{}} + + arch := New(repo, testFS, Options{}) + arch.runWorkers(ctx, &tmb) + + back := fs.TestChdir(t, tempdir) + defer back() + + if test.prepare != nil { + test.prepare(t) + } + + atree, err := NewTree(testFS, test.targets) + if err != nil { + t.Fatal(err) + } + + tree, err := arch.SaveTree(ctx, "/", atree, nil) + if err != nil { + t.Fatal(err) + } + + treeID, err := repo.SaveTree(ctx, tree) + if err != nil { + t.Fatal(err) + } + + tmb.Kill(nil) + err = tmb.Wait() + if err != nil { + t.Fatal(err) + } + + err = repo.Flush(ctx) + if err != nil { + t.Fatal(err) + } + + err = repo.SaveIndex(ctx) + if err != nil { + t.Fatal(err) + } + + want := test.want + if want == nil { + want = test.src + } + TestEnsureTree(ctx, t, "/", repo, treeID, want) + }) + } +} + +func TestArchiverSnapshot(t *testing.T) { + var tests = []struct { + name string + src TestDir + want TestDir + chdir string + targets []string + }{ + { + name: "single-file", + src: TestDir{ + "foo": TestFile{Content: "foo"}, + }, + targets: []string{"foo"}, + }, + { + name: "file-current-dir", + src: TestDir{ + "foo": TestFile{Content: "foo"}, + }, + targets: []string{"./foo"}, + }, + { + name: "dir", + src: TestDir{ + "target": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + targets: []string{"target"}, + }, + { + name: "dir-current-dir", + src: TestDir{ + "target": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + targets: []string{"./target"}, + }, + { + name: "content-dir-current-dir", + src: TestDir{ + "target": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + targets: []string{"./target/."}, + }, + { + name: "current-dir", + src: TestDir{ + "target": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + targets: []string{"."}, + }, + { + name: "subdir", + src: TestDir{ + "subdir": TestDir{ + "foo": TestFile{Content: "foo"}, + "subsubdir": TestDir{ + "foo": TestFile{Content: "foo in subsubdir"}, + }, + }, + "other": TestFile{Content: "another file"}, + }, + targets: []string{"subdir"}, + want: TestDir{ + "subdir": TestDir{ + "foo": TestFile{Content: "foo"}, + "subsubdir": TestDir{ + "foo": TestFile{Content: "foo in subsubdir"}, + }, + }, + }, + }, + { + name: "subsubdir", + src: TestDir{ + "subdir": TestDir{ + "foo": TestFile{Content: "foo"}, + "subsubdir": TestDir{ + "foo": TestFile{Content: "foo in subsubdir"}, + }, + }, + "other": TestFile{Content: "another file"}, + }, + targets: []string{"subdir/subsubdir"}, + want: TestDir{ + "subdir": TestDir{ + "subsubdir": TestDir{ + "foo": TestFile{Content: "foo in subsubdir"}, + }, + }, + }, + }, + { + name: "parent-dir", + src: TestDir{ + "subdir": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + "other": TestFile{Content: "another file"}, + }, + chdir: "subdir", + targets: []string{".."}, + }, + { + name: "parent-parent-dir", + src: TestDir{ + "subdir": TestDir{ + "foo": TestFile{Content: "foo"}, + "subsubdir": TestDir{ + "empty": TestFile{Content: ""}, + }, + }, + "other": TestFile{Content: "another file"}, + }, + chdir: "subdir/subsubdir", + targets: []string{"../.."}, + }, + { + name: "parent-parent-dir-slash", + src: TestDir{ + "subdir": TestDir{ + "subsubdir": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + "other": TestFile{Content: "another file"}, + }, + chdir: "subdir/subsubdir", + targets: []string{"../../"}, + want: TestDir{ + "subdir": TestDir{ + "subsubdir": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + "other": TestFile{Content: "another file"}, + }, + }, + { + name: "parent-subdir", + src: TestDir{ + "subdir": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + "other": TestFile{Content: "another file"}, + }, + chdir: "subdir", + targets: []string{"../subdir"}, + want: TestDir{ + "subdir": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + }, + { + name: "parent-parent-dir-subdir", + src: TestDir{ + "subdir": TestDir{ + "subsubdir": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + "other": TestFile{Content: "another file"}, + }, + chdir: "subdir/subsubdir", + targets: []string{"../../subdir/subsubdir"}, + want: TestDir{ + "subdir": TestDir{ + "subsubdir": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + }, + }, + { + name: "included-multiple1", + src: TestDir{ + "subdir": TestDir{ + "subsubdir": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + "other": TestFile{Content: "another file"}, + }, + }, + targets: []string{"subdir", "subdir/subsubdir"}, + }, + { + name: "included-multiple2", + src: TestDir{ + "subdir": TestDir{ + "subsubdir": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + "other": TestFile{Content: "another file"}, + }, + }, + targets: []string{"subdir/subsubdir", "subdir"}, + }, + { + name: "collision", + src: TestDir{ + "subdir": TestDir{ + "foo": TestFile{Content: "foo in subdir"}, + "subsubdir": TestDir{ + "foo": TestFile{Content: "foo in subsubdir"}, + }, + }, + "foo": TestFile{Content: "another file"}, + }, + chdir: "subdir", + targets: []string{".", "../foo"}, + want: TestDir{ + + "foo": TestFile{Content: "foo in subdir"}, + "subsubdir": TestDir{ + "foo": TestFile{Content: "foo in subsubdir"}, + }, + "foo-1": TestFile{Content: "another file"}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src) + defer cleanup() + + arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) + + chdir := tempdir + if test.chdir != "" { + chdir = filepath.Join(chdir, filepath.FromSlash(test.chdir)) + } + + back := fs.TestChdir(t, chdir) + defer back() + + var targets []string + for _, target := range test.targets { + targets = append(targets, os.ExpandEnv(target)) + } + + t.Logf("targets: %v", targets) + sn, snapshotID, err := arch.Snapshot(ctx, targets, SnapshotOptions{Time: time.Now()}) + if err != nil { + t.Fatal(err) + } + + t.Logf("saved as %v", snapshotID.Str()) + + want := test.want + if want == nil { + want = test.src + } + TestEnsureSnapshot(t, repo, snapshotID, want) + + checker.TestCheckRepo(t, repo) + + // check that the snapshot contains the targets with absolute paths + for i, target := range sn.Paths { + atarget, err := filepath.Abs(test.targets[i]) + if err != nil { + t.Fatal(err) + } + + if target != atarget { + t.Errorf("wrong path in snapshot: want %v, got %v", atarget, target) + } + } + }) + } +} + +func TestArchiverSnapshotSelect(t *testing.T) { + var tests = []struct { + name string + src TestDir + want TestDir + selFn SelectFunc + err string + }{ + { + name: "include-all", + src: TestDir{ + "work": TestDir{ + "foo": TestFile{Content: "foo"}, + "foo.txt": TestFile{Content: "foo text file"}, + "subdir": TestDir{ + "other": TestFile{Content: "other in subdir"}, + "bar.txt": TestFile{Content: "bar.txt in subdir"}, + }, + }, + "other": TestFile{Content: "another file"}, + }, + selFn: func(item string, fi os.FileInfo) bool { + return true + }, + }, + { + name: "exclude-all", + src: TestDir{ + "work": TestDir{ + "foo": TestFile{Content: "foo"}, + "foo.txt": TestFile{Content: "foo text file"}, + "subdir": TestDir{ + "other": TestFile{Content: "other in subdir"}, + "bar.txt": TestFile{Content: "bar.txt in subdir"}, + }, + }, + "other": TestFile{Content: "another file"}, + }, + selFn: func(item string, fi os.FileInfo) bool { + return false + }, + err: "snapshot is empty", + }, + { + name: "exclude-txt-files", + src: TestDir{ + "work": TestDir{ + "foo": TestFile{Content: "foo"}, + "foo.txt": TestFile{Content: "foo text file"}, + "subdir": TestDir{ + "other": TestFile{Content: "other in subdir"}, + "bar.txt": TestFile{Content: "bar.txt in subdir"}, + }, + }, + "other": TestFile{Content: "another file"}, + }, + want: TestDir{ + "work": TestDir{ + "foo": TestFile{Content: "foo"}, + "subdir": TestDir{ + "other": TestFile{Content: "other in subdir"}, + }, + }, + "other": TestFile{Content: "another file"}, + }, + selFn: func(item string, fi os.FileInfo) bool { + if filepath.Ext(item) == ".txt" { + return false + } + return true + }, + }, + { + name: "exclude-dir", + src: TestDir{ + "work": TestDir{ + "foo": TestFile{Content: "foo"}, + "foo.txt": TestFile{Content: "foo text file"}, + "subdir": TestDir{ + "other": TestFile{Content: "other in subdir"}, + "bar.txt": TestFile{Content: "bar.txt in subdir"}, + }, + }, + "other": TestFile{Content: "another file"}, + }, + want: TestDir{ + "work": TestDir{ + "foo": TestFile{Content: "foo"}, + "foo.txt": TestFile{Content: "foo text file"}, + }, + "other": TestFile{Content: "another file"}, + }, + selFn: func(item string, fi os.FileInfo) bool { + if filepath.Base(item) == "subdir" { + return false + } + return true + }, + }, + { + name: "select-absolute-paths", + src: TestDir{ + "foo": TestFile{Content: "foo"}, + }, + selFn: func(item string, fi os.FileInfo) bool { + return filepath.IsAbs(item) + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src) + defer cleanup() + + arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) + arch.Select = test.selFn + + back := fs.TestChdir(t, tempdir) + defer back() + + targets := []string{"."} + _, snapshotID, err := arch.Snapshot(ctx, targets, SnapshotOptions{Time: time.Now()}) + if test.err != "" { + if err == nil { + t.Fatalf("expected error not found, got %v, wanted %q", err, test.err) + } + + if err.Error() != test.err { + t.Fatalf("unexpected error, want %q, got %q", test.err, err) + } + + return + } + + if err != nil { + t.Fatal(err) + } + + t.Logf("saved as %v", snapshotID.Str()) + + want := test.want + if want == nil { + want = test.src + } + TestEnsureSnapshot(t, repo, snapshotID, want) + + checker.TestCheckRepo(t, repo) + }) + } +} + +// MockFS keeps track which files are read. +type MockFS struct { + fs.FS + + m sync.Mutex + bytesRead map[string]int // tracks bytes read from all opened files +} + +func (m *MockFS) Open(name string) (fs.File, error) { + f, err := m.FS.Open(name) + if err != nil { + return f, err + } + + return MockFile{File: f, fs: m, filename: name}, nil +} + +func (m *MockFS) OpenFile(name string, flag int, perm os.FileMode) (fs.File, error) { + f, err := m.FS.OpenFile(name, flag, perm) + if err != nil { + return f, err + } + + return MockFile{File: f, fs: m, filename: name}, nil +} + +type MockFile struct { + fs.File + filename string + + fs *MockFS +} + +func (f MockFile) Read(p []byte) (int, error) { + n, err := f.File.Read(p) + if n > 0 { + f.fs.m.Lock() + f.fs.bytesRead[f.filename] += n + f.fs.m.Unlock() + } + return n, err +} + +func TestArchiverParent(t *testing.T) { + var tests = []struct { + src TestDir + read map[string]int // tracks number of times a file must have been read + }{ + { + src: TestDir{ + "targetfile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))}, + }, + read: map[string]int{ + "targetfile": 1, + }, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src) + defer cleanup() + + testFS := &MockFS{ + FS: fs.Track{FS: fs.Local{}}, + bytesRead: make(map[string]int), + } + + arch := New(repo, testFS, Options{}) + + back := fs.TestChdir(t, tempdir) + defer back() + + _, firstSnapshotID, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) + if err != nil { + t.Fatal(err) + } + + t.Logf("first backup saved as %v", firstSnapshotID.Str()) + t.Logf("testfs: %v", testFS) + + // check that all files have been read exactly once + TestWalkFiles(t, ".", test.src, func(filename string, item interface{}) error { + file, ok := item.(TestFile) + if !ok { + return nil + } + + n, ok := testFS.bytesRead[filename] + if !ok { + t.Fatalf("file %v was not read at all", filename) + } + + if n != len(file.Content) { + t.Fatalf("file %v: read %v bytes, wanted %v bytes", filename, n, len(file.Content)) + } + return nil + }) + + opts := SnapshotOptions{ + Time: time.Now(), + ParentSnapshot: firstSnapshotID, + } + _, secondSnapshotID, err := arch.Snapshot(ctx, []string{"."}, opts) + if err != nil { + t.Fatal(err) + } + + // check that all files still been read exactly once + TestWalkFiles(t, ".", test.src, func(filename string, item interface{}) error { + file, ok := item.(TestFile) + if !ok { + return nil + } + + n, ok := testFS.bytesRead[filename] + if !ok { + t.Fatalf("file %v was not read at all", filename) + } + + if n != len(file.Content) { + t.Fatalf("file %v: read %v bytes, wanted %v bytes", filename, n, len(file.Content)) + } + return nil + }) + + t.Logf("second backup saved as %v", secondSnapshotID.Str()) + t.Logf("testfs: %v", testFS) + + checker.TestCheckRepo(t, repo) + }) + } +} + +func TestArchiverErrorReporting(t *testing.T) { + ignoreErrorForBasename := func(basename string) ErrorFunc { + return func(item string, fi os.FileInfo, err error) error { + if filepath.Base(item) == "targetfile" { + t.Logf("ignoring error for targetfile: %v", err) + return nil + } + + t.Errorf("error handler called for unexpected file %v: %v", item, err) + return err + } + } + + chmodUnreadable := func(filename string) func(testing.TB) { + return func(t testing.TB) { + if runtime.GOOS == "windows" { + t.Skip("Skipping this test for windows") + } + + err := os.Chmod(filepath.FromSlash(filename), 0004) + if err != nil { + t.Fatal(err) + } + } + } + + var tests = []struct { + name string + src TestDir + want TestDir + prepare func(t testing.TB) + errFn ErrorFunc + mustError bool + }{ + { + name: "no-error", + src: TestDir{ + "targetfile": TestFile{Content: "foobar"}, + }, + }, + { + name: "file-unreadable", + src: TestDir{ + "targetfile": TestFile{Content: "foobar"}, + }, + prepare: chmodUnreadable("targetfile"), + mustError: true, + }, + { + name: "file-unreadable-ignore-error", + src: TestDir{ + "targetfile": TestFile{Content: "foobar"}, + "other": TestFile{Content: "xxx"}, + }, + want: TestDir{ + "other": TestFile{Content: "xxx"}, + }, + prepare: chmodUnreadable("targetfile"), + errFn: ignoreErrorForBasename("targetfile"), + }, + { + name: "file-subdir-unreadable", + src: TestDir{ + "subdir": TestDir{ + "targetfile": TestFile{Content: "foobar"}, + }, + }, + prepare: chmodUnreadable("subdir/targetfile"), + mustError: true, + }, + { + name: "file-subdir-unreadable-ignore-error", + src: TestDir{ + "subdir": TestDir{ + "targetfile": TestFile{Content: "foobar"}, + "other": TestFile{Content: "xxx"}, + }, + }, + want: TestDir{ + "subdir": TestDir{ + "other": TestFile{Content: "xxx"}, + }, + }, + prepare: chmodUnreadable("subdir/targetfile"), + errFn: ignoreErrorForBasename("targetfile"), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src) + defer cleanup() + + back := fs.TestChdir(t, tempdir) + defer back() + + if test.prepare != nil { + test.prepare(t) + } + + arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) + arch.Error = test.errFn + + _, snapshotID, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) + if test.mustError { + if err != nil { + t.Logf("found expected error (%v), skipping further checks", err) + return + } + + t.Fatalf("expected error not returned by archiver") + return + } + + if err != nil { + t.Fatalf("unexpected error of type %T found: %v", err, err) + } + + t.Logf("saved as %v", snapshotID.Str()) + + want := test.want + if want == nil { + want = test.src + } + TestEnsureSnapshot(t, repo, snapshotID, want) + + checker.TestCheckRepo(t, repo) + }) + } +} + +// TrackFS keeps track which files are opened. For some files, an error is injected. +type TrackFS struct { + fs.FS + + errorOn map[string]error + + opened map[string]uint + m sync.Mutex +} + +func (m *TrackFS) Open(name string) (fs.File, error) { + m.m.Lock() + m.opened[name]++ + m.m.Unlock() + + return m.FS.Open(name) +} + +func (m *TrackFS) OpenFile(name string, flag int, perm os.FileMode) (fs.File, error) { + m.m.Lock() + m.opened[name]++ + m.m.Unlock() + + return m.FS.OpenFile(name, flag, perm) +} + +type failSaveRepo struct { + restic.Repository + failAfter int32 + cnt int32 + err error +} + +func (f *failSaveRepo) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID) (restic.ID, error) { + val := atomic.AddInt32(&f.cnt, 1) + if val >= f.failAfter { + return restic.ID{}, f.err + } + + return f.Repository.SaveBlob(ctx, t, buf, id) +} + +func TestArchiverAbortEarlyOnError(t *testing.T) { + var testErr = errors.New("test error") + + var tests = []struct { + src TestDir + wantOpen map[string]uint + failAfter uint // error after so many files have been saved to the repo + err error + }{ + { + src: TestDir{ + "dir": TestDir{ + "bar": TestFile{Content: "foobar"}, + "baz": TestFile{Content: "foobar"}, + "foo": TestFile{Content: "foobar"}, + }, + }, + wantOpen: map[string]uint{ + filepath.FromSlash("dir/bar"): 1, + filepath.FromSlash("dir/baz"): 1, + filepath.FromSlash("dir/foo"): 1, + }, + }, + { + src: TestDir{ + "dir": TestDir{ + "file1": TestFile{Content: string(restictest.Random(3, 4*1024*1024))}, + "file2": TestFile{Content: string(restictest.Random(3, 4*1024*1024))}, + "file3": TestFile{Content: string(restictest.Random(3, 4*1024*1024))}, + "file4": TestFile{Content: string(restictest.Random(3, 4*1024*1024))}, + "file5": TestFile{Content: string(restictest.Random(3, 4*1024*1024))}, + "file6": TestFile{Content: string(restictest.Random(3, 4*1024*1024))}, + "file7": TestFile{Content: string(restictest.Random(3, 4*1024*1024))}, + "file8": TestFile{Content: string(restictest.Random(3, 4*1024*1024))}, + "file9": TestFile{Content: string(restictest.Random(3, 4*1024*1024))}, + }, + }, + wantOpen: map[string]uint{ + filepath.FromSlash("dir/file1"): 1, + filepath.FromSlash("dir/file2"): 1, + filepath.FromSlash("dir/file3"): 1, + filepath.FromSlash("dir/file7"): 0, + filepath.FromSlash("dir/file8"): 0, + filepath.FromSlash("dir/file9"): 0, + }, + failAfter: 5, + err: testErr, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src) + defer cleanup() + + back := fs.TestChdir(t, tempdir) + defer back() + + testFS := &TrackFS{ + FS: fs.Track{FS: fs.Local{}}, + opened: make(map[string]uint), + } + + if testFS.errorOn == nil { + testFS.errorOn = make(map[string]error) + } + + testRepo := &failSaveRepo{ + Repository: repo, + failAfter: int32(test.failAfter), + err: test.err, + } + + arch := New(testRepo, testFS, Options{}) + + _, _, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) + if errors.Cause(err) != test.err { + t.Errorf("expected error (%v) not found, got %v", test.err, errors.Cause(err)) + } + + t.Logf("Snapshot return error: %v", err) + + t.Logf("track fs: %v", testFS.opened) + + for k, v := range test.wantOpen { + if testFS.opened[k] != v { + t.Errorf("opened %v %d times, want %d", k, testFS.opened[k], v) + } + } + }) + } +} diff --git a/internal/archiver/blob_saver.go b/internal/archiver/blob_saver.go new file mode 100644 index 000000000..c20b96919 --- /dev/null +++ b/internal/archiver/blob_saver.go @@ -0,0 +1,176 @@ +package archiver + +import ( + "context" + "sync" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/restic" + tomb "gopkg.in/tomb.v2" +) + +// Saver allows saving a blob. +type Saver interface { + SaveBlob(ctx context.Context, t restic.BlobType, data []byte, id restic.ID) (restic.ID, error) + Index() restic.Index +} + +// BlobSaver concurrently saves incoming blobs to the repo. +type BlobSaver struct { + repo Saver + + m sync.Mutex + knownBlobs restic.BlobSet + + ch chan<- saveBlobJob + done <-chan struct{} +} + +// NewBlobSaver returns a new blob. A worker pool is started, it is stopped +// when ctx is cancelled. +func NewBlobSaver(ctx context.Context, t *tomb.Tomb, repo Saver, workers uint) *BlobSaver { + ch := make(chan saveBlobJob) + s := &BlobSaver{ + repo: repo, + knownBlobs: restic.NewBlobSet(), + ch: ch, + done: t.Dying(), + } + + for i := uint(0); i < workers; i++ { + t.Go(func() error { + return s.worker(t.Context(ctx), ch) + }) + } + + return s +} + +// Save stores a blob in the repo. It checks the index and the known blobs +// before saving anything. The second return parameter is true if the blob was +// previously unknown. +func (s *BlobSaver) Save(ctx context.Context, t restic.BlobType, buf *Buffer) FutureBlob { + ch := make(chan saveBlobResponse, 1) + select { + case s.ch <- saveBlobJob{BlobType: t, buf: buf, ch: ch}: + case <-s.done: + debug.Log("not sending job, BlobSaver is done") + close(ch) + return FutureBlob{ch: ch} + case <-ctx.Done(): + debug.Log("not sending job, context is cancelled") + close(ch) + return FutureBlob{ch: ch} + } + + return FutureBlob{ch: ch, length: len(buf.Data)} +} + +// FutureBlob is returned by SaveBlob and will return the data once it has been processed. +type FutureBlob struct { + ch <-chan saveBlobResponse + length int + res saveBlobResponse +} + +// Wait blocks until the result is available or the context is cancelled. +func (s *FutureBlob) Wait(ctx context.Context) { + select { + case <-ctx.Done(): + return + case res, ok := <-s.ch: + if ok { + s.res = res + } + } +} + +// ID returns the ID of the blob after it has been saved. +func (s *FutureBlob) ID() restic.ID { + return s.res.id +} + +// Known returns whether or not the blob was already known. +func (s *FutureBlob) Known() bool { + return s.res.known +} + +// Length returns the length of the blob. +func (s *FutureBlob) Length() int { + return s.length +} + +type saveBlobJob struct { + restic.BlobType + buf *Buffer + ch chan<- saveBlobResponse +} + +type saveBlobResponse struct { + id restic.ID + known bool +} + +func (s *BlobSaver) saveBlob(ctx context.Context, t restic.BlobType, buf []byte) (saveBlobResponse, error) { + id := restic.Hash(buf) + h := restic.BlobHandle{ID: id, Type: t} + + // check if another goroutine has already saved this blob + known := false + s.m.Lock() + if s.knownBlobs.Has(h) { + known = true + } else { + s.knownBlobs.Insert(h) + known = false + } + s.m.Unlock() + + // blob is already known, nothing to do + if known { + return saveBlobResponse{ + id: id, + known: true, + }, nil + } + + // check if the repo knows this blob + if s.repo.Index().Has(id, t) { + return saveBlobResponse{ + id: id, + known: true, + }, nil + } + + // otherwise we're responsible for saving it + _, err := s.repo.SaveBlob(ctx, t, buf, id) + if err != nil { + return saveBlobResponse{}, err + } + + return saveBlobResponse{ + id: id, + known: false, + }, nil +} + +func (s *BlobSaver) worker(ctx context.Context, jobs <-chan saveBlobJob) error { + for { + var job saveBlobJob + select { + case <-ctx.Done(): + return nil + case job = <-jobs: + } + + res, err := s.saveBlob(ctx, job.BlobType, job.buf.Data) + if err != nil { + debug.Log("saveBlob returned error, exiting: %v", err) + close(job.ch) + return err + } + job.ch <- res + close(job.ch) + job.buf.Release() + } +} diff --git a/internal/archiver/blob_saver_test.go b/internal/archiver/blob_saver_test.go new file mode 100644 index 000000000..6d7af3fed --- /dev/null +++ b/internal/archiver/blob_saver_test.go @@ -0,0 +1,115 @@ +package archiver + +import ( + "context" + "fmt" + "runtime" + "sync/atomic" + "testing" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + tomb "gopkg.in/tomb.v2" +) + +var errTest = errors.New("test error") + +type saveFail struct { + idx restic.Index + cnt int32 + failAt int32 +} + +func (b *saveFail) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID) (restic.ID, error) { + val := atomic.AddInt32(&b.cnt, 1) + if val == b.failAt { + return restic.ID{}, errTest + } + + return id, nil +} + +func (b *saveFail) Index() restic.Index { + return b.idx +} + +func TestBlobSaver(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var tmb tomb.Tomb + saver := &saveFail{ + idx: repository.NewIndex(), + } + + b := NewBlobSaver(ctx, &tmb, saver, uint(runtime.NumCPU())) + + var results []FutureBlob + + for i := 0; i < 20; i++ { + buf := &Buffer{Data: []byte(fmt.Sprintf("foo%d", i))} + fb := b.Save(ctx, restic.DataBlob, buf) + results = append(results, fb) + } + + for i, blob := range results { + blob.Wait(ctx) + if blob.Known() { + t.Errorf("blob %v is known, that should not be the case", i) + } + } + + tmb.Kill(nil) + + err := tmb.Wait() + if err != nil { + t.Fatal(err) + } +} + +func TestBlobSaverError(t *testing.T) { + var tests = []struct { + blobs int + failAt int + }{ + {20, 2}, + {20, 5}, + {20, 15}, + {200, 150}, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var tmb tomb.Tomb + saver := &saveFail{ + idx: repository.NewIndex(), + failAt: int32(test.failAt), + } + + b := NewBlobSaver(ctx, &tmb, saver, uint(runtime.NumCPU())) + + var results []FutureBlob + + for i := 0; i < test.blobs; i++ { + buf := &Buffer{Data: []byte(fmt.Sprintf("foo%d", i))} + fb := b.Save(ctx, restic.DataBlob, buf) + results = append(results, fb) + } + + tmb.Kill(nil) + + err := tmb.Wait() + if err == nil { + t.Errorf("expected error not found") + } + + if err != errTest { + t.Fatalf("unexpected error found: %v", err) + } + }) + } +} diff --git a/internal/archiver/buffer.go b/internal/archiver/buffer.go new file mode 100644 index 000000000..ef7131322 --- /dev/null +++ b/internal/archiver/buffer.go @@ -0,0 +1,89 @@ +package archiver + +import ( + "context" + "sync" +) + +// Buffer is a reusable buffer. After the buffer has been used, Release should +// be called so the underlying slice is put back into the pool. +type Buffer struct { + Data []byte + Put func(*Buffer) +} + +// Release puts the buffer back into the pool it came from. +func (b *Buffer) Release() { + if b.Put != nil { + b.Put(b) + } +} + +// BufferPool implements a limited set of reusable buffers. +type BufferPool struct { + ch chan *Buffer + chM sync.Mutex + defaultSize int + clearOnce sync.Once +} + +// NewBufferPool initializes a new buffer pool. When the context is cancelled, +// all buffers are released. The pool stores at most max items. New buffers are +// created with defaultSize, buffers that are larger are released and not put +// back. +func NewBufferPool(ctx context.Context, max int, defaultSize int) *BufferPool { + b := &BufferPool{ + ch: make(chan *Buffer, max), + defaultSize: defaultSize, + } + go func() { + <-ctx.Done() + b.clear() + }() + return b +} + +// Get returns a new buffer, either from the pool or newly allocated. +func (pool *BufferPool) Get() *Buffer { + pool.chM.Lock() + defer pool.chM.Unlock() + select { + case buf := <-pool.ch: + return buf + default: + } + + b := &Buffer{ + Put: pool.Put, + Data: make([]byte, pool.defaultSize), + } + + return b +} + +// Put returns a buffer to the pool for reuse. +func (pool *BufferPool) Put(b *Buffer) { + if cap(b.Data) > pool.defaultSize { + return + } + + pool.chM.Lock() + defer pool.chM.Unlock() + select { + case pool.ch <- b: + default: + } +} + +// clear empties the buffer so that all items can be garbage collected. +func (pool *BufferPool) clear() { + pool.clearOnce.Do(func() { + ch := pool.ch + pool.chM.Lock() + pool.ch = nil + pool.chM.Unlock() + close(ch) + for range ch { + } + }) +} diff --git a/internal/archiver/doc.go b/internal/archiver/doc.go new file mode 100644 index 000000000..928145aa2 --- /dev/null +++ b/internal/archiver/doc.go @@ -0,0 +1,12 @@ +// Package archiver contains the code which reads files, splits them into +// chunks and saves the data to the repository. +// +// An Archiver has a number of worker goroutines handling saving the different +// data structures to the repository, the details are implemented by the +// FileSaver, BlobSaver, and TreeSaver types. +// +// The main goroutine (the one calling Snapshot()) traverses the directory tree +// and delegates all work to these worker pools. They return a type +// (FutureFile, FutureBlob, and FutureTree) which can be resolved later, by +// calling Wait() on it. +package archiver diff --git a/internal/archiver/file_saver.go b/internal/archiver/file_saver.go new file mode 100644 index 000000000..66defe358 --- /dev/null +++ b/internal/archiver/file_saver.go @@ -0,0 +1,251 @@ +package archiver + +import ( + "context" + "io" + "os" + + "github.com/restic/chunker" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/restic" + tomb "gopkg.in/tomb.v2" +) + +// FutureFile is returned by Save and will return the data once it +// has been processed. +type FutureFile struct { + ch <-chan saveFileResponse + res saveFileResponse +} + +// Wait blocks until the result of the save operation is received or ctx is +// cancelled. +func (s *FutureFile) Wait(ctx context.Context) { + select { + case res, ok := <-s.ch: + if ok { + s.res = res + } + case <-ctx.Done(): + return + } +} + +// Node returns the node once it is available. +func (s *FutureFile) Node() *restic.Node { + return s.res.node +} + +// Stats returns the stats for the file once they are available. +func (s *FutureFile) Stats() ItemStats { + return s.res.stats +} + +// Err returns the error in case an error occurred. +func (s *FutureFile) Err() error { + return s.res.err +} + +// SaveBlobFn saves a blob to a repo. +type SaveBlobFn func(context.Context, restic.BlobType, *Buffer) FutureBlob + +// FileSaver concurrently saves incoming files to the repo. +type FileSaver struct { + fs fs.FS + saveFilePool *BufferPool + saveBlob SaveBlobFn + + pol chunker.Pol + + ch chan<- saveFileJob + done <-chan struct{} + + CompleteBlob func(filename string, bytes uint64) + + NodeFromFileInfo func(filename string, fi os.FileInfo) (*restic.Node, error) +} + +// NewFileSaver returns a new file saver. A worker pool with fileWorkers is +// started, it is stopped when ctx is cancelled. +func NewFileSaver(ctx context.Context, t *tomb.Tomb, fs fs.FS, save SaveBlobFn, pol chunker.Pol, fileWorkers, blobWorkers uint) *FileSaver { + ch := make(chan saveFileJob) + + debug.Log("new file saver with %v file workers and %v blob workers", fileWorkers, blobWorkers) + + poolSize := fileWorkers + blobWorkers + + s := &FileSaver{ + fs: fs, + saveBlob: save, + saveFilePool: NewBufferPool(ctx, int(poolSize), chunker.MaxSize), + pol: pol, + ch: ch, + done: t.Dying(), + + CompleteBlob: func(string, uint64) {}, + } + + for i := uint(0); i < fileWorkers; i++ { + t.Go(func() error { + s.worker(t.Context(ctx), ch) + return nil + }) + } + + return s +} + +// CompleteFunc is called when the file has been saved. +type CompleteFunc func(*restic.Node, ItemStats) + +// Save stores the file f and returns the data once it has been completed. The +// file is closed by Save. +func (s *FileSaver) Save(ctx context.Context, snPath string, file fs.File, fi os.FileInfo, start func(), complete CompleteFunc) FutureFile { + ch := make(chan saveFileResponse, 1) + job := saveFileJob{ + snPath: snPath, + file: file, + fi: fi, + start: start, + complete: complete, + ch: ch, + } + + select { + case s.ch <- job: + case <-s.done: + debug.Log("not sending job, FileSaver is done") + _ = file.Close() + close(ch) + return FutureFile{ch: ch} + case <-ctx.Done(): + debug.Log("not sending job, context is cancelled: %v", ctx.Err()) + _ = file.Close() + close(ch) + return FutureFile{ch: ch} + } + + return FutureFile{ch: ch} +} + +type saveFileJob struct { + snPath string + file fs.File + fi os.FileInfo + ch chan<- saveFileResponse + complete CompleteFunc + start func() +} + +type saveFileResponse struct { + node *restic.Node + stats ItemStats + err error +} + +// saveFile stores the file f in the repo, then closes it. +func (s *FileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPath string, f fs.File, fi os.FileInfo, start func()) saveFileResponse { + start() + + stats := ItemStats{} + + debug.Log("%v", snPath) + + node, err := s.NodeFromFileInfo(f.Name(), fi) + if err != nil { + _ = f.Close() + return saveFileResponse{err: err} + } + + if node.Type != "file" { + _ = f.Close() + return saveFileResponse{err: errors.Errorf("node type %q is wrong", node.Type)} + } + + // reuse the chunker + chnker.Reset(f, s.pol) + + var results []FutureBlob + + node.Content = []restic.ID{} + var size uint64 + for { + buf := s.saveFilePool.Get() + chunk, err := chnker.Next(buf.Data) + if errors.Cause(err) == io.EOF { + buf.Release() + break + } + + buf.Data = chunk.Data + + size += uint64(chunk.Length) + + if err != nil { + _ = f.Close() + return saveFileResponse{err: err} + } + + // test if the context has been cancelled, return the error + if ctx.Err() != nil { + _ = f.Close() + return saveFileResponse{err: ctx.Err()} + } + + res := s.saveBlob(ctx, restic.DataBlob, buf) + results = append(results, res) + + // test if the context has been cancelled, return the error + if ctx.Err() != nil { + _ = f.Close() + return saveFileResponse{err: ctx.Err()} + } + + s.CompleteBlob(f.Name(), uint64(len(chunk.Data))) + } + + err = f.Close() + if err != nil { + return saveFileResponse{err: err} + } + + for _, res := range results { + res.Wait(ctx) + if !res.Known() { + stats.DataBlobs++ + stats.DataSize += uint64(res.Length()) + } + + node.Content = append(node.Content, res.ID()) + } + + node.Size = size + + return saveFileResponse{ + node: node, + stats: stats, + } +} + +func (s *FileSaver) worker(ctx context.Context, jobs <-chan saveFileJob) { + // a worker has one chunker which is reused for each file (because it contains a rather large buffer) + chnker := chunker.New(nil, s.pol) + + for { + var job saveFileJob + select { + case <-ctx.Done(): + return + case job = <-jobs: + } + + res := s.saveFile(ctx, chnker, job.snPath, job.file, job.fi, job.start) + if job.complete != nil { + job.complete(res.node, res.stats) + } + job.ch <- res + close(job.ch) + } +} diff --git a/internal/archiver/file_saver_test.go b/internal/archiver/file_saver_test.go new file mode 100644 index 000000000..c8cf58735 --- /dev/null +++ b/internal/archiver/file_saver_test.go @@ -0,0 +1,97 @@ +package archiver + +import ( + "context" + "fmt" + "io/ioutil" + "path/filepath" + "runtime" + "testing" + + "github.com/restic/chunker" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/test" + tomb "gopkg.in/tomb.v2" +) + +func createTestFiles(t testing.TB, num int) (files []string, cleanup func()) { + tempdir, cleanup := test.TempDir(t) + + for i := 0; i < 15; i++ { + filename := fmt.Sprintf("testfile-%d", i) + err := ioutil.WriteFile(filepath.Join(tempdir, filename), []byte(filename), 0600) + if err != nil { + t.Fatal(err) + } + files = append(files, filepath.Join(tempdir, filename)) + } + + return files, cleanup +} + +func startFileSaver(ctx context.Context, t testing.TB, fs fs.FS) (*FileSaver, *tomb.Tomb) { + var tmb tomb.Tomb + + saveBlob := func(ctx context.Context, tpe restic.BlobType, buf *Buffer) FutureBlob { + ch := make(chan saveBlobResponse) + close(ch) + return FutureBlob{ch: ch} + } + + workers := uint(runtime.NumCPU()) + pol, err := chunker.RandomPolynomial() + if err != nil { + t.Fatal(err) + } + + s := NewFileSaver(ctx, &tmb, fs, saveBlob, pol, workers, workers) + s.NodeFromFileInfo = restic.NodeFromFileInfo + + return s, &tmb +} + +func TestFileSaver(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + files, cleanup := createTestFiles(t, 15) + defer cleanup() + + startFn := func() {} + completeFn := func(*restic.Node, ItemStats) {} + + testFs := fs.Local{} + s, tmb := startFileSaver(ctx, t, testFs) + + var results []FutureFile + + for _, filename := range files { + f, err := testFs.Open(filename) + if err != nil { + t.Fatal(err) + } + + fi, err := f.Stat() + if err != nil { + t.Fatal(err) + } + + ff := s.Save(ctx, filename, f, fi, startFn, completeFn) + results = append(results, ff) + } + + for _, file := range results { + file.Wait(ctx) + if file.Err() != nil { + t.Errorf("unable to save file: %v", file.Err()) + } + } + + tmb.Kill(nil) + + err := tmb.Wait() + if err != nil { + t.Fatal(err) + } +} diff --git a/internal/archiver/index_uploader.go b/internal/archiver/index_uploader.go new file mode 100644 index 000000000..c6edb7a01 --- /dev/null +++ b/internal/archiver/index_uploader.go @@ -0,0 +1,53 @@ +package archiver + +import ( + "context" + "time" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" +) + +// IndexUploader polls the repo for full indexes and uploads them. +type IndexUploader struct { + restic.Repository + + // Start is called when an index is to be uploaded. + Start func() + + // Complete is called when uploading an index has finished. + Complete func(id restic.ID) +} + +// Upload periodically uploads full indexes to the repo. When shutdown is +// cancelled, the last index upload will finish and then Upload returns. +func (u IndexUploader) Upload(ctx, shutdown context.Context, interval time.Duration) error { + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return nil + case <-shutdown.Done(): + return nil + case <-ticker.C: + full := u.Repository.Index().(*repository.MasterIndex).FullIndexes() + for _, idx := range full { + if u.Start != nil { + u.Start() + } + + id, err := repository.SaveIndex(ctx, u.Repository, idx) + if err != nil { + debug.Log("save indexes returned an error: %v", err) + return err + } + if u.Complete != nil { + u.Complete(id) + } + } + } + } +} diff --git a/internal/archiver/scanner.go b/internal/archiver/scanner.go new file mode 100644 index 000000000..bd789893c --- /dev/null +++ b/internal/archiver/scanner.go @@ -0,0 +1,107 @@ +package archiver + +import ( + "context" + "os" + "path/filepath" + + "github.com/restic/restic/internal/fs" +) + +// Scanner traverses the targets and calls the function Result with cumulated +// stats concerning the files and folders found. Select is used to decide which +// items should be included. Error is called when an error occurs. +type Scanner struct { + FS fs.FS + SelectByName SelectByNameFunc + Select SelectFunc + Error ErrorFunc + Result func(item string, s ScanStats) +} + +// NewScanner initializes a new Scanner. +func NewScanner(fs fs.FS) *Scanner { + return &Scanner{ + FS: fs, + SelectByName: func(item string) bool { return true }, + Select: func(item string, fi os.FileInfo) bool { return true }, + Error: func(item string, fi os.FileInfo, err error) error { return err }, + Result: func(item string, s ScanStats) {}, + } +} + +// ScanStats collect statistics. +type ScanStats struct { + Files, Dirs, Others uint + Bytes uint64 +} + +// Scan traverses the targets. The function Result is called for each new item +// found, the complete result is also returned by Scan. +func (s *Scanner) Scan(ctx context.Context, targets []string) error { + var stats ScanStats + for _, target := range targets { + abstarget, err := s.FS.Abs(target) + if err != nil { + return err + } + + stats, err = s.scan(ctx, stats, abstarget) + if err != nil { + return err + } + + if ctx.Err() != nil { + return nil + } + } + + s.Result("", stats) + return nil +} + +func (s *Scanner) scan(ctx context.Context, stats ScanStats, target string) (ScanStats, error) { + if ctx.Err() != nil { + return stats, nil + } + + // exclude files by path before running stat to reduce number of lstat calls + if !s.SelectByName(target) { + return stats, nil + } + + // get file information + fi, err := s.FS.Lstat(target) + if err != nil { + return stats, s.Error(target, fi, err) + } + + // run remaining select functions that require file information + if !s.Select(target, fi) { + return stats, nil + } + + switch { + case fi.Mode().IsRegular(): + stats.Files++ + stats.Bytes += uint64(fi.Size()) + case fi.Mode().IsDir(): + names, err := readdirnames(s.FS, target) + if err != nil { + return stats, s.Error(target, fi, err) + } + + for _, name := range names { + stats, err = s.scan(ctx, stats, filepath.Join(target, name)) + if err != nil { + return stats, err + } + } + stats.Dirs++ + default: + stats.Others++ + } + + s.Result(target, stats) + return stats, nil +} diff --git a/internal/archiver/scanner_test.go b/internal/archiver/scanner_test.go new file mode 100644 index 000000000..a171df5f6 --- /dev/null +++ b/internal/archiver/scanner_test.go @@ -0,0 +1,329 @@ +package archiver + +import ( + "context" + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/restic/restic/internal/fs" + restictest "github.com/restic/restic/internal/test" +) + +func TestScanner(t *testing.T) { + var tests = []struct { + name string + src TestDir + want map[string]ScanStats + selFn SelectFunc + }{ + { + name: "include-all", + src: TestDir{ + "other": TestFile{Content: "another file"}, + "work": TestDir{ + "foo": TestFile{Content: "foo"}, + "foo.txt": TestFile{Content: "foo text file"}, + "subdir": TestDir{ + "other": TestFile{Content: "other in subdir"}, + "bar.txt": TestFile{Content: "bar.txt in subdir"}, + }, + }, + }, + want: map[string]ScanStats{ + filepath.FromSlash("other"): ScanStats{Files: 1, Bytes: 12}, + filepath.FromSlash("work/foo"): ScanStats{Files: 2, Bytes: 15}, + filepath.FromSlash("work/foo.txt"): ScanStats{Files: 3, Bytes: 28}, + filepath.FromSlash("work/subdir/bar.txt"): ScanStats{Files: 4, Bytes: 45}, + filepath.FromSlash("work/subdir/other"): ScanStats{Files: 5, Bytes: 60}, + filepath.FromSlash("work/subdir"): ScanStats{Files: 5, Dirs: 1, Bytes: 60}, + filepath.FromSlash("work"): ScanStats{Files: 5, Dirs: 2, Bytes: 60}, + filepath.FromSlash("."): ScanStats{Files: 5, Dirs: 3, Bytes: 60}, + filepath.FromSlash(""): ScanStats{Files: 5, Dirs: 3, Bytes: 60}, + }, + }, + { + name: "select-txt", + src: TestDir{ + "other": TestFile{Content: "another file"}, + "work": TestDir{ + "foo": TestFile{Content: "foo"}, + "foo.txt": TestFile{Content: "foo text file"}, + "subdir": TestDir{ + "other": TestFile{Content: "other in subdir"}, + "bar.txt": TestFile{Content: "bar.txt in subdir"}, + }, + }, + }, + selFn: func(item string, fi os.FileInfo) bool { + if fi.IsDir() { + return true + } + + if filepath.Ext(item) == ".txt" { + return true + } + return false + }, + want: map[string]ScanStats{ + filepath.FromSlash("work/foo.txt"): ScanStats{Files: 1, Bytes: 13}, + filepath.FromSlash("work/subdir/bar.txt"): ScanStats{Files: 2, Bytes: 30}, + filepath.FromSlash("work/subdir"): ScanStats{Files: 2, Dirs: 1, Bytes: 30}, + filepath.FromSlash("work"): ScanStats{Files: 2, Dirs: 2, Bytes: 30}, + filepath.FromSlash("."): ScanStats{Files: 2, Dirs: 3, Bytes: 30}, + filepath.FromSlash(""): ScanStats{Files: 2, Dirs: 3, Bytes: 30}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tempdir, cleanup := restictest.TempDir(t) + defer cleanup() + + TestCreateFiles(t, tempdir, test.src) + + back := fs.TestChdir(t, tempdir) + defer back() + + cur, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + sc := NewScanner(fs.Track{FS: fs.Local{}}) + if test.selFn != nil { + sc.Select = test.selFn + } + + results := make(map[string]ScanStats) + sc.Result = func(item string, s ScanStats) { + var p string + var err error + + if item != "" { + p, err = filepath.Rel(cur, item) + if err != nil { + panic(err) + } + } + + results[p] = s + } + + err = sc.Scan(ctx, []string{"."}) + if err != nil { + t.Fatal(err) + } + + if !cmp.Equal(test.want, results) { + t.Error(cmp.Diff(test.want, results)) + } + }) + } +} + +func TestScannerError(t *testing.T) { + var tests = []struct { + name string + unix bool + src TestDir + result ScanStats + selFn SelectFunc + errFn func(t testing.TB, item string, fi os.FileInfo, err error) error + resFn func(t testing.TB, item string, s ScanStats) + prepare func(t testing.TB) + }{ + { + name: "no-error", + src: TestDir{ + "other": TestFile{Content: "another file"}, + "work": TestDir{ + "foo": TestFile{Content: "foo"}, + "foo.txt": TestFile{Content: "foo text file"}, + "subdir": TestDir{ + "other": TestFile{Content: "other in subdir"}, + "bar.txt": TestFile{Content: "bar.txt in subdir"}, + }, + }, + }, + result: ScanStats{Files: 5, Dirs: 3, Bytes: 60}, + }, + { + name: "unreadable-dir", + unix: true, + src: TestDir{ + "other": TestFile{Content: "another file"}, + "work": TestDir{ + "foo": TestFile{Content: "foo"}, + "foo.txt": TestFile{Content: "foo text file"}, + "subdir": TestDir{ + "other": TestFile{Content: "other in subdir"}, + "bar.txt": TestFile{Content: "bar.txt in subdir"}, + }, + }, + }, + result: ScanStats{Files: 3, Dirs: 2, Bytes: 28}, + prepare: func(t testing.TB) { + err := os.Chmod(filepath.Join("work", "subdir"), 0000) + if err != nil { + t.Fatal(err) + } + }, + errFn: func(t testing.TB, item string, fi os.FileInfo, err error) error { + if item == filepath.FromSlash("work/subdir") { + return nil + } + + return err + }, + }, + { + name: "removed-item", + src: TestDir{ + "bar": TestFile{Content: "bar"}, + "baz": TestFile{Content: "baz"}, + "foo": TestFile{Content: "foo"}, + "other": TestFile{Content: "other"}, + }, + result: ScanStats{Files: 3, Dirs: 1, Bytes: 11}, + resFn: func(t testing.TB, item string, s ScanStats) { + if item == "bar" { + err := os.Remove("foo") + if err != nil { + t.Fatal(err) + } + } + }, + errFn: func(t testing.TB, item string, fi os.FileInfo, err error) error { + if item == "foo" { + t.Logf("ignoring error for %v: %v", item, err) + return nil + } + + return err + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.unix && runtime.GOOS == "windows" { + t.Skipf("skip on windows") + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tempdir, cleanup := restictest.TempDir(t) + defer cleanup() + + TestCreateFiles(t, tempdir, test.src) + + back := fs.TestChdir(t, tempdir) + defer back() + + cur, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + if test.prepare != nil { + test.prepare(t) + } + + sc := NewScanner(fs.Track{FS: fs.Local{}}) + if test.selFn != nil { + sc.Select = test.selFn + } + + var stats ScanStats + + sc.Result = func(item string, s ScanStats) { + if item == "" { + stats = s + return + } + + if test.resFn != nil { + p, relErr := filepath.Rel(cur, item) + if relErr != nil { + panic(relErr) + } + test.resFn(t, p, s) + } + } + if test.errFn != nil { + sc.Error = func(item string, fi os.FileInfo, err error) error { + p, relErr := filepath.Rel(cur, item) + if relErr != nil { + panic(relErr) + } + + return test.errFn(t, p, fi, err) + } + } + + err = sc.Scan(ctx, []string{"."}) + if err != nil { + t.Fatal(err) + } + + if stats != test.result { + t.Errorf("wrong final result, want\n %#v\ngot:\n %#v", test.result, stats) + } + }) + } +} + +func TestScannerCancel(t *testing.T) { + src := TestDir{ + "bar": TestFile{Content: "bar"}, + "baz": TestFile{Content: "baz"}, + "foo": TestFile{Content: "foo"}, + "other": TestFile{Content: "other"}, + } + + result := ScanStats{Files: 2, Dirs: 1, Bytes: 6} + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tempdir, cleanup := restictest.TempDir(t) + defer cleanup() + + TestCreateFiles(t, tempdir, src) + + back := fs.TestChdir(t, tempdir) + defer back() + + cur, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + sc := NewScanner(fs.Track{FS: fs.Local{}}) + var lastStats ScanStats + sc.Result = func(item string, s ScanStats) { + lastStats = s + + if item == filepath.Join(cur, "baz") { + t.Logf("found baz") + cancel() + } + } + + err = sc.Scan(ctx, []string{"."}) + if err != nil { + t.Errorf("unexpected error %v found", err) + } + + if lastStats != result { + t.Errorf("wrong final result, want\n %#v\ngot:\n %#v", result, lastStats) + } +} diff --git a/internal/archiver/testing.go b/internal/archiver/testing.go new file mode 100644 index 000000000..bdb122d69 --- /dev/null +++ b/internal/archiver/testing.go @@ -0,0 +1,343 @@ +package archiver + +import ( + "context" + "io/ioutil" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "testing" + "time" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/test" +) + +// TestSnapshot creates a new snapshot of path. +func TestSnapshot(t testing.TB, repo restic.Repository, path string, parent *restic.ID) *restic.Snapshot { + arch := New(repo, fs.Local{}, Options{}) + opts := SnapshotOptions{ + Time: time.Now(), + Hostname: "localhost", + Tags: []string{"test"}, + } + if parent != nil { + opts.ParentSnapshot = *parent + } + sn, _, err := arch.Snapshot(context.TODO(), []string{path}, opts) + if err != nil { + t.Fatal(err) + } + return sn +} + +// TestDir describes a directory structure to create for a test. +type TestDir map[string]interface{} + +func (d TestDir) String() string { + return "" +} + +// TestFile describes a file created for a test. +type TestFile struct { + Content string +} + +func (f TestFile) String() string { + return "" +} + +// TestSymlink describes a symlink created for a test. +type TestSymlink struct { + Target string +} + +func (s TestSymlink) String() string { + return "" +} + +// TestCreateFiles creates a directory structure described by dir at target, +// which must already exist. On Windows, symlinks aren't created. +func TestCreateFiles(t testing.TB, target string, dir TestDir) { + test.Helper(t).Helper() + for name, item := range dir { + targetPath := filepath.Join(target, name) + + switch it := item.(type) { + case TestFile: + err := ioutil.WriteFile(targetPath, []byte(it.Content), 0644) + if err != nil { + t.Fatal(err) + } + case TestSymlink: + if runtime.GOOS == "windows" { + continue + } + + err := fs.Symlink(filepath.FromSlash(it.Target), targetPath) + if err != nil { + t.Fatal(err) + } + case TestDir: + err := fs.Mkdir(targetPath, 0755) + if err != nil { + t.Fatal(err) + } + + TestCreateFiles(t, targetPath, it) + } + } +} + +// TestWalkFunc is used by TestWalkFiles to traverse the dir. When an error is +// returned, traversal stops and the surrounding test is marked as failed. +type TestWalkFunc func(path string, item interface{}) error + +// TestWalkFiles runs fn for each file/directory in dir, the filename will be +// constructed with target as the prefix. Symlinks on Windows are ignored. +func TestWalkFiles(t testing.TB, target string, dir TestDir, fn TestWalkFunc) { + test.Helper(t).Helper() + for name, item := range dir { + targetPath := filepath.Join(target, name) + + err := fn(targetPath, item) + if err != nil { + t.Fatalf("TestWalkFunc returned error for %v: %v", targetPath, err) + return + } + + if dir, ok := item.(TestDir); ok { + TestWalkFiles(t, targetPath, dir, fn) + } + } +} + +// fixpath removes UNC paths (starting with `\\?`) on windows. On Linux, it's a noop. +func fixpath(item string) string { + if runtime.GOOS != "windows" { + return item + } + if strings.HasPrefix(item, `\\?`) { + return item[4:] + } + return item +} + +// TestEnsureFiles tests if the directory structure at target is the same as +// described in dir. +func TestEnsureFiles(t testing.TB, target string, dir TestDir) { + test.Helper(t).Helper() + pathsChecked := make(map[string]struct{}) + + // first, test that all items are there + TestWalkFiles(t, target, dir, func(path string, item interface{}) error { + // ignore symlinks on Windows + if _, ok := item.(TestSymlink); ok && runtime.GOOS == "windows" { + // mark paths and parents as checked + pathsChecked[path] = struct{}{} + for parent := filepath.Dir(path); parent != target; parent = filepath.Dir(parent) { + pathsChecked[parent] = struct{}{} + } + return nil + } + + fi, err := fs.Lstat(path) + if err != nil { + return err + } + + switch node := item.(type) { + case TestDir: + if !fi.IsDir() { + t.Errorf("is not a directory: %v", path) + } + return nil + case TestFile: + if !fs.IsRegularFile(fi) { + t.Errorf("is not a regular file: %v", path) + return nil + } + + content, err := ioutil.ReadFile(path) + if err != nil { + return err + } + + if string(content) != node.Content { + t.Errorf("wrong content for %v, want %q, got %q", path, node.Content, content) + } + case TestSymlink: + if fi.Mode()&os.ModeType != os.ModeSymlink { + t.Errorf("is not a symlink: %v", path) + return nil + } + + target, err := fs.Readlink(path) + if err != nil { + return err + } + + if target != node.Target { + t.Errorf("wrong target for %v, want %v, got %v", path, node.Target, target) + } + } + + pathsChecked[path] = struct{}{} + + for parent := filepath.Dir(path); parent != target; parent = filepath.Dir(parent) { + pathsChecked[parent] = struct{}{} + } + + return nil + }) + + // then, traverse the directory again, looking for additional files + err := fs.Walk(target, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + path = fixpath(path) + + if path == target { + return nil + } + + _, ok := pathsChecked[path] + if !ok { + t.Errorf("additional item found: %v %v", path, fi.Mode()) + } + + return nil + }) + if err != nil { + t.Fatal(err) + } +} + +// TestEnsureFileContent checks if the file in the repo is the same as file. +func TestEnsureFileContent(ctx context.Context, t testing.TB, repo restic.Repository, filename string, node *restic.Node, file TestFile) { + if int(node.Size) != len(file.Content) { + t.Fatalf("%v: wrong node size: want %d, got %d", filename, node.Size, len(file.Content)) + return + } + + content := make([]byte, restic.CiphertextLength(len(file.Content))) + pos := 0 + for _, id := range node.Content { + n, err := repo.LoadBlob(ctx, restic.DataBlob, id, content[pos:]) + if err != nil { + t.Fatalf("error loading blob %v: %v", id.Str(), err) + return + } + + pos += n + } + + content = content[:pos] + + if string(content) != file.Content { + t.Fatalf("%v: wrong content returned, want %q, got %q", filename, file.Content, content) + } +} + +// TestEnsureTree checks that the tree ID in the repo matches dir. On Windows, +// Symlinks are ignored. +func TestEnsureTree(ctx context.Context, t testing.TB, prefix string, repo restic.Repository, treeID restic.ID, dir TestDir) { + test.Helper(t).Helper() + + tree, err := repo.LoadTree(ctx, treeID) + if err != nil { + t.Fatal(err) + return + } + + var nodeNames []string + for _, node := range tree.Nodes { + nodeNames = append(nodeNames, node.Name) + } + debug.Log("%v (%v) %v", prefix, treeID.Str(), nodeNames) + + checked := make(map[string]struct{}) + for _, node := range tree.Nodes { + nodePrefix := path.Join(prefix, node.Name) + + entry, ok := dir[node.Name] + if !ok { + t.Errorf("unexpected tree node %q found, want: %#v", node.Name, dir) + return + } + + checked[node.Name] = struct{}{} + + switch e := entry.(type) { + case TestDir: + if node.Type != "dir" { + t.Errorf("tree node %v has wrong type %q, want %q", nodePrefix, node.Type, "dir") + return + } + + if node.Subtree == nil { + t.Errorf("tree node %v has nil subtree", nodePrefix) + return + } + + TestEnsureTree(ctx, t, path.Join(prefix, node.Name), repo, *node.Subtree, e) + case TestFile: + if node.Type != "file" { + t.Errorf("tree node %v has wrong type %q, want %q", nodePrefix, node.Type, "file") + } + TestEnsureFileContent(ctx, t, repo, nodePrefix, node, e) + case TestSymlink: + // skip symlinks on windows + if runtime.GOOS == "windows" { + continue + } + if node.Type != "symlink" { + t.Errorf("tree node %v has wrong type %q, want %q", nodePrefix, node.Type, "file") + } + + if e.Target != node.LinkTarget { + t.Errorf("symlink %v has wrong target, want %q, got %q", nodePrefix, e.Target, node.LinkTarget) + } + } + } + + for name := range dir { + // skip checking symlinks on Windows + entry := dir[name] + if _, ok := entry.(TestSymlink); ok && runtime.GOOS == "windows" { + continue + } + + _, ok := checked[name] + if !ok { + t.Errorf("tree %v: expected node %q not found, has: %v", prefix, name, nodeNames) + } + } +} + +// TestEnsureSnapshot tests if the snapshot in the repo has exactly the same +// structure as dir. On Windows, Symlinks are ignored. +func TestEnsureSnapshot(t testing.TB, repo restic.Repository, snapshotID restic.ID, dir TestDir) { + test.Helper(t).Helper() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sn, err := restic.LoadSnapshot(ctx, repo, snapshotID) + if err != nil { + t.Fatal(err) + return + } + + if sn.Tree == nil { + t.Fatal("snapshot has nil tree ID") + return + } + + TestEnsureTree(ctx, t, "/", repo, *sn.Tree, dir) +} diff --git a/internal/archiver/testing_test.go b/internal/archiver/testing_test.go new file mode 100644 index 000000000..c8ce8d529 --- /dev/null +++ b/internal/archiver/testing_test.go @@ -0,0 +1,530 @@ +package archiver + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/repository" + restictest "github.com/restic/restic/internal/test" +) + +// MockT passes through all logging functions from T, but catches Fail(), +// Error/f() and Fatal/f(). It is used to test test helper functions. +type MockT struct { + *testing.T + HasFailed bool +} + +// Fail marks the function as having failed but continues execution. +func (t *MockT) Fail() { + t.T.Log("MockT Fail() called") + t.HasFailed = true +} + +// Fatal is equivalent to Log followed by FailNow. +func (t *MockT) Fatal(args ...interface{}) { + t.T.Logf("MockT Fatal called with %v", args) + t.HasFailed = true +} + +// Fatalf is equivalent to Logf followed by FailNow. +func (t *MockT) Fatalf(msg string, args ...interface{}) { + t.T.Logf("MockT Fatal called: "+msg, args...) + t.HasFailed = true +} + +// Error is equivalent to Log followed by Fail. +func (t *MockT) Error(args ...interface{}) { + t.T.Logf("MockT Error called with %v", args) + t.HasFailed = true +} + +// Errorf is equivalent to Logf followed by Fail. +func (t *MockT) Errorf(msg string, args ...interface{}) { + t.T.Logf("MockT Error called: "+msg, args...) + t.HasFailed = true +} + +func createFilesAt(t testing.TB, targetdir string, files map[string]interface{}) { + for name, item := range files { + target := filepath.Join(targetdir, filepath.FromSlash(name)) + err := fs.MkdirAll(filepath.Dir(target), 0700) + if err != nil { + t.Fatal(err) + } + + switch it := item.(type) { + case TestFile: + err := ioutil.WriteFile(target, []byte(it.Content), 0600) + if err != nil { + t.Fatal(err) + } + case TestSymlink: + // ignore symlinks on windows + if runtime.GOOS == "windows" { + continue + } + err := fs.Symlink(filepath.FromSlash(it.Target), target) + if err != nil { + t.Fatal(err) + } + } + } +} + +func TestTestCreateFiles(t *testing.T) { + var tests = []struct { + dir TestDir + files map[string]interface{} + }{ + { + dir: TestDir{ + "foo": TestFile{Content: "foo"}, + "subdir": TestDir{ + "subfile": TestFile{Content: "bar"}, + }, + "sub": TestDir{ + "subsub": TestDir{ + "link": TestSymlink{Target: "x/y/z"}, + }, + }, + }, + files: map[string]interface{}{ + "foo": TestFile{Content: "foo"}, + "subdir": TestDir{}, + "subdir/subfile": TestFile{Content: "bar"}, + "sub/subsub/link": TestSymlink{Target: "x/y/z"}, + }, + }, + } + + for i, test := range tests { + tempdir, cleanup := restictest.TempDir(t) + defer cleanup() + + t.Run("", func(t *testing.T) { + tempdir := filepath.Join(tempdir, fmt.Sprintf("test-%d", i)) + err := fs.MkdirAll(tempdir, 0700) + if err != nil { + t.Fatal(err) + } + + TestCreateFiles(t, tempdir, test.dir) + + for name, item := range test.files { + // don't check symlinks on windows + if runtime.GOOS == "windows" { + if _, ok := item.(TestSymlink); ok { + continue + } + continue + } + + targetPath := filepath.Join(tempdir, filepath.FromSlash(name)) + fi, err := fs.Lstat(targetPath) + if err != nil { + t.Error(err) + continue + } + + switch node := item.(type) { + case TestFile: + if !fs.IsRegularFile(fi) { + t.Errorf("is not regular file: %v", name) + continue + } + + content, err := ioutil.ReadFile(targetPath) + if err != nil { + t.Error(err) + continue + } + + if string(content) != node.Content { + t.Errorf("wrong content for %v: want %q, got %q", name, node.Content, content) + } + case TestSymlink: + if fi.Mode()&os.ModeType != os.ModeSymlink { + t.Errorf("is not symlink: %v, %o != %o", name, fi.Mode(), os.ModeSymlink) + continue + } + + target, err := fs.Readlink(targetPath) + if err != nil { + t.Error(err) + continue + } + + if target != node.Target { + t.Errorf("wrong target for %v: want %q, got %q", name, node.Target, target) + } + case TestDir: + if !fi.IsDir() { + t.Errorf("is not directory: %v", name) + } + } + } + }) + } +} + +func TestTestWalkFiles(t *testing.T) { + var tests = []struct { + dir TestDir + want map[string]string + }{ + { + dir: TestDir{ + "foo": TestFile{Content: "foo"}, + "subdir": TestDir{ + "subfile": TestFile{Content: "bar"}, + }, + "x": TestDir{ + "y": TestDir{ + "link": TestSymlink{Target: filepath.FromSlash("../../foo")}, + }, + }, + }, + want: map[string]string{ + "foo": "", + "subdir": "", + filepath.FromSlash("subdir/subfile"): "", + "x": "", + filepath.FromSlash("x/y"): "", + filepath.FromSlash("x/y/link"): "", + }, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + tempdir, cleanup := restictest.TempDir(t) + defer cleanup() + + got := make(map[string]string) + + TestCreateFiles(t, tempdir, test.dir) + TestWalkFiles(t, tempdir, test.dir, func(path string, item interface{}) error { + p, err := filepath.Rel(tempdir, path) + if err != nil { + return err + } + + got[p] = fmt.Sprintf("%v", item) + return nil + }) + + if !cmp.Equal(test.want, got) { + t.Error(cmp.Diff(test.want, got)) + } + }) + } +} + +func TestTestEnsureFiles(t *testing.T) { + var tests = []struct { + expectFailure bool + files map[string]interface{} + want TestDir + unixOnly bool + }{ + { + files: map[string]interface{}{ + "foo": TestFile{Content: "foo"}, + "subdir/subfile": TestFile{Content: "bar"}, + "x/y/link": TestSymlink{Target: "../../foo"}, + }, + want: TestDir{ + "foo": TestFile{Content: "foo"}, + "subdir": TestDir{ + "subfile": TestFile{Content: "bar"}, + }, + "x": TestDir{ + "y": TestDir{ + "link": TestSymlink{Target: "../../foo"}, + }, + }, + }, + }, + { + expectFailure: true, + files: map[string]interface{}{ + "foo": TestFile{Content: "foo"}, + }, + want: TestDir{ + "foo": TestFile{Content: "foo"}, + "subdir": TestDir{ + "subfile": TestFile{Content: "bar"}, + }, + }, + }, + { + expectFailure: true, + files: map[string]interface{}{ + "foo": TestFile{Content: "foo"}, + "subdir/subfile": TestFile{Content: "bar"}, + }, + want: TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + { + expectFailure: true, + files: map[string]interface{}{ + "foo": TestFile{Content: "xxx"}, + }, + want: TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + { + expectFailure: true, + files: map[string]interface{}{ + "foo": TestSymlink{Target: "/xxx"}, + }, + want: TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + { + expectFailure: true, + unixOnly: true, + files: map[string]interface{}{ + "foo": TestFile{Content: "foo"}, + }, + want: TestDir{ + "foo": TestSymlink{Target: "/xxx"}, + }, + }, + { + expectFailure: true, + unixOnly: true, + files: map[string]interface{}{ + "foo": TestSymlink{Target: "xxx"}, + }, + want: TestDir{ + "foo": TestSymlink{Target: "/yyy"}, + }, + }, + { + expectFailure: true, + files: map[string]interface{}{ + "foo": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + want: TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + { + expectFailure: true, + files: map[string]interface{}{ + "foo": TestFile{Content: "foo"}, + }, + want: TestDir{ + "foo": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + if test.unixOnly && runtime.GOOS == "windows" { + t.Skip("skip on Windows") + return + } + + tempdir, cleanup := restictest.TempDir(t) + defer cleanup() + + createFilesAt(t, tempdir, test.files) + + subtestT := testing.TB(t) + if test.expectFailure { + subtestT = &MockT{T: t} + } + + TestEnsureFiles(subtestT, tempdir, test.want) + + if test.expectFailure && !subtestT.(*MockT).HasFailed { + t.Fatal("expected failure of TestEnsureFiles not found") + } + }) + } +} + +func TestTestEnsureSnapshot(t *testing.T) { + var tests = []struct { + expectFailure bool + files map[string]interface{} + want TestDir + unixOnly bool + }{ + { + files: map[string]interface{}{ + "foo": TestFile{Content: "foo"}, + filepath.FromSlash("subdir/subfile"): TestFile{Content: "bar"}, + filepath.FromSlash("x/y/link"): TestSymlink{Target: filepath.FromSlash("../../foo")}, + }, + want: TestDir{ + "target": TestDir{ + "foo": TestFile{Content: "foo"}, + "subdir": TestDir{ + "subfile": TestFile{Content: "bar"}, + }, + "x": TestDir{ + "y": TestDir{ + "link": TestSymlink{Target: filepath.FromSlash("../../foo")}, + }, + }, + }, + }, + }, + { + expectFailure: true, + files: map[string]interface{}{ + "foo": TestFile{Content: "foo"}, + }, + want: TestDir{ + "target": TestDir{ + "bar": TestFile{Content: "foo"}, + }, + }, + }, + { + expectFailure: true, + files: map[string]interface{}{ + "foo": TestFile{Content: "foo"}, + "bar": TestFile{Content: "bar"}, + }, + want: TestDir{ + "target": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + }, + { + expectFailure: true, + files: map[string]interface{}{ + "foo": TestFile{Content: "foo"}, + }, + want: TestDir{ + "target": TestDir{ + "foo": TestFile{Content: "foo"}, + "bar": TestFile{Content: "bar"}, + }, + }, + }, + { + expectFailure: true, + files: map[string]interface{}{ + "foo": TestFile{Content: "foo"}, + }, + want: TestDir{ + "target": TestDir{ + "foo": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + }, + }, + { + expectFailure: true, + files: map[string]interface{}{ + "foo": TestSymlink{Target: filepath.FromSlash("x/y/z")}, + }, + want: TestDir{ + "target": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + }, + { + expectFailure: true, + unixOnly: true, + files: map[string]interface{}{ + "foo": TestSymlink{Target: filepath.FromSlash("x/y/z")}, + }, + want: TestDir{ + "target": TestDir{ + "foo": TestSymlink{Target: filepath.FromSlash("x/y/z2")}, + }, + }, + }, + { + expectFailure: true, + files: map[string]interface{}{ + "foo": TestFile{Content: "foo"}, + }, + want: TestDir{ + "target": TestDir{ + "foo": TestFile{Content: "xxx"}, + }, + }, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + if test.unixOnly && runtime.GOOS == "windows" { + t.Skip("skip on Windows") + return + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tempdir, cleanup := restictest.TempDir(t) + defer cleanup() + + targetDir := filepath.Join(tempdir, "target") + err := fs.Mkdir(targetDir, 0700) + if err != nil { + t.Fatal(err) + } + + createFilesAt(t, targetDir, test.files) + + back := fs.TestChdir(t, tempdir) + defer back() + + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + arch := New(repo, fs.Local{}, Options{}) + opts := SnapshotOptions{ + Time: time.Now(), + Hostname: "localhost", + Tags: []string{"test"}, + } + _, id, err := arch.Snapshot(ctx, []string{"."}, opts) + if err != nil { + t.Fatal(err) + } + + t.Logf("snapshot saved as %v", id.Str()) + + subtestT := testing.TB(t) + if test.expectFailure { + subtestT = &MockT{T: t} + } + + TestEnsureSnapshot(subtestT, repo, id, test.want) + + if test.expectFailure && !subtestT.(*MockT).HasFailed { + t.Fatal("expected failure of TestEnsureSnapshot not found") + } + }) + } +} diff --git a/internal/archiver/tree.go b/internal/archiver/tree.go new file mode 100644 index 000000000..0c8a21539 --- /dev/null +++ b/internal/archiver/tree.go @@ -0,0 +1,281 @@ +package archiver + +import ( + "fmt" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/fs" +) + +// Tree recursively defines how a snapshot should look like when +// archived. +// +// When `Path` is set, this is a leaf node and the contents of `Path` should be +// inserted at this point in the tree. +// +// The attribute `Root` is used to distinguish between files/dirs which have +// the same name, but live in a separate directory on the local file system. +// +// `FileInfoPath` is used to extract metadata for intermediate (=non-leaf) +// trees. +type Tree struct { + Nodes map[string]Tree + Path string // where the files/dirs to be saved are found + FileInfoPath string // where the dir can be found that is not included itself, but its subdirs + Root string // parent directory of the tree +} + +// pathComponents returns all path components of p. If a virtual directory +// (volume name on Windows) is added, virtualPrefix is set to true. See the +// tests for examples. +func pathComponents(fs fs.FS, p string, includeRelative bool) (components []string, virtualPrefix bool) { + volume := fs.VolumeName(p) + + if !fs.IsAbs(p) { + if !includeRelative { + p = fs.Join(fs.Separator(), p) + } + } + + p = fs.Clean(p) + + for { + dir, file := fs.Dir(p), fs.Base(p) + + if p == dir { + break + } + + components = append(components, file) + p = dir + } + + // reverse components + for i := len(components)/2 - 1; i >= 0; i-- { + opp := len(components) - 1 - i + components[i], components[opp] = components[opp], components[i] + } + + if volume != "" { + // strip colon + if len(volume) == 2 && volume[1] == ':' { + volume = volume[:1] + } + + components = append([]string{volume}, components...) + virtualPrefix = true + } + + return components, virtualPrefix +} + +// rootDirectory returns the directory which contains the first element of target. +func rootDirectory(fs fs.FS, target string) string { + if target == "" { + return "" + } + + if fs.IsAbs(target) { + return fs.Join(fs.VolumeName(target), fs.Separator()) + } + + target = fs.Clean(target) + pc, _ := pathComponents(fs, target, true) + + rel := "." + for _, c := range pc { + if c == ".." { + rel = fs.Join(rel, c) + } + } + + return rel +} + +// Add adds a new file or directory to the tree. +func (t *Tree) Add(fs fs.FS, path string) error { + if path == "" { + panic("invalid path (empty string)") + } + + if t.Nodes == nil { + t.Nodes = make(map[string]Tree) + } + + pc, virtualPrefix := pathComponents(fs, path, false) + if len(pc) == 0 { + return errors.New("invalid path (no path components)") + } + + name := pc[0] + root := rootDirectory(fs, path) + tree := Tree{Root: root} + + origName := name + i := 0 + for { + other, ok := t.Nodes[name] + if !ok { + break + } + + i++ + if other.Root == root { + tree = other + break + } + + // resolve conflict and try again + name = fmt.Sprintf("%s-%d", origName, i) + continue + } + + if len(pc) > 1 { + subroot := fs.Join(root, origName) + if virtualPrefix { + // use the original root dir if this is a virtual directory (volume name on Windows) + subroot = root + } + err := tree.add(fs, path, subroot, pc[1:]) + if err != nil { + return err + } + tree.FileInfoPath = subroot + } else { + tree.Path = path + } + + t.Nodes[name] = tree + return nil +} + +// add adds a new target path into the tree. +func (t *Tree) add(fs fs.FS, target, root string, pc []string) error { + if len(pc) == 0 { + return errors.Errorf("invalid path %q", target) + } + + if t.Nodes == nil { + t.Nodes = make(map[string]Tree) + } + + name := pc[0] + + if len(pc) == 1 { + tree, ok := t.Nodes[name] + + if !ok { + t.Nodes[name] = Tree{Path: target} + return nil + } + + if tree.Path != "" { + return errors.Errorf("path is already set for target %v", target) + } + tree.Path = target + t.Nodes[name] = tree + return nil + } + + tree := Tree{} + if other, ok := t.Nodes[name]; ok { + tree = other + } + + subroot := fs.Join(root, name) + tree.FileInfoPath = subroot + + err := tree.add(fs, target, subroot, pc[1:]) + if err != nil { + return err + } + t.Nodes[name] = tree + + return nil +} + +func (t Tree) String() string { + return formatTree(t, "") +} + +// formatTree returns a text representation of the tree t. +func formatTree(t Tree, indent string) (s string) { + for name, node := range t.Nodes { + s += fmt.Sprintf("%v/%v, root %q, path %q, meta %q\n", indent, name, node.Root, node.Path, node.FileInfoPath) + s += formatTree(node, indent+" ") + } + return s +} + +// unrollTree unrolls the tree so that only leaf nodes have Path set. +func unrollTree(f fs.FS, t *Tree) error { + // if the current tree is a leaf node (Path is set) and has additional + // nodes, add the contents of Path to the nodes. + if t.Path != "" && len(t.Nodes) > 0 { + debug.Log("resolve path %v", t.Path) + entries, err := fs.ReadDirNames(f, t.Path) + if err != nil { + return err + } + + for _, entry := range entries { + if node, ok := t.Nodes[entry]; ok { + if node.Path == "" { + node.Path = f.Join(t.Path, entry) + t.Nodes[entry] = node + continue + } + + if node.Path == f.Join(t.Path, entry) { + continue + } + + return errors.Errorf("tree unrollTree: collision on path, node %#v, path %q", node, f.Join(t.Path, entry)) + } + t.Nodes[entry] = Tree{Path: f.Join(t.Path, entry)} + } + t.Path = "" + } + + for i, subtree := range t.Nodes { + err := unrollTree(f, &subtree) + if err != nil { + return err + } + + t.Nodes[i] = subtree + } + + return nil +} + +// NewTree creates a Tree from the target files/directories. +func NewTree(fs fs.FS, targets []string) (*Tree, error) { + debug.Log("targets: %v", targets) + tree := &Tree{} + seen := make(map[string]struct{}) + for _, target := range targets { + target = fs.Clean(target) + + // skip duplicate targets + if _, ok := seen[target]; ok { + continue + } + seen[target] = struct{}{} + + err := tree.Add(fs, target) + if err != nil { + return nil, err + } + } + + debug.Log("before unroll:\n%v", tree) + err := unrollTree(fs, tree) + if err != nil { + return nil, err + } + + debug.Log("result:\n%v", tree) + return tree, nil +} diff --git a/internal/archiver/tree_saver.go b/internal/archiver/tree_saver.go new file mode 100644 index 000000000..29b899e82 --- /dev/null +++ b/internal/archiver/tree_saver.go @@ -0,0 +1,171 @@ +package archiver + +import ( + "context" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/restic" + tomb "gopkg.in/tomb.v2" +) + +// FutureTree is returned by Save and will return the data once it +// has been processed. +type FutureTree struct { + ch <-chan saveTreeResponse + res saveTreeResponse +} + +// Wait blocks until the data has been received or ctx is cancelled. +func (s *FutureTree) Wait(ctx context.Context) { + select { + case <-ctx.Done(): + return + case res, ok := <-s.ch: + if ok { + s.res = res + } + } +} + +// Node returns the node. +func (s *FutureTree) Node() *restic.Node { + return s.res.node +} + +// Stats returns the stats for the file. +func (s *FutureTree) Stats() ItemStats { + return s.res.stats +} + +// TreeSaver concurrently saves incoming trees to the repo. +type TreeSaver struct { + saveTree func(context.Context, *restic.Tree) (restic.ID, ItemStats, error) + errFn ErrorFunc + + ch chan<- saveTreeJob + done <-chan struct{} +} + +// NewTreeSaver returns a new tree saver. A worker pool with treeWorkers is +// started, it is stopped when ctx is cancelled. +func NewTreeSaver(ctx context.Context, t *tomb.Tomb, treeWorkers uint, saveTree func(context.Context, *restic.Tree) (restic.ID, ItemStats, error), errFn ErrorFunc) *TreeSaver { + ch := make(chan saveTreeJob) + + s := &TreeSaver{ + ch: ch, + done: t.Dying(), + saveTree: saveTree, + errFn: errFn, + } + + for i := uint(0); i < treeWorkers; i++ { + t.Go(func() error { + return s.worker(t.Context(ctx), ch) + }) + } + + return s +} + +// Save stores the dir d and returns the data once it has been completed. +func (s *TreeSaver) Save(ctx context.Context, snPath string, node *restic.Node, nodes []FutureNode) FutureTree { + ch := make(chan saveTreeResponse, 1) + job := saveTreeJob{ + snPath: snPath, + node: node, + nodes: nodes, + ch: ch, + } + select { + case s.ch <- job: + case <-s.done: + debug.Log("not saving tree, TreeSaver is done") + close(ch) + return FutureTree{ch: ch} + case <-ctx.Done(): + debug.Log("not saving tree, context is cancelled") + close(ch) + return FutureTree{ch: ch} + } + + return FutureTree{ch: ch} +} + +type saveTreeJob struct { + snPath string + nodes []FutureNode + node *restic.Node + ch chan<- saveTreeResponse +} + +type saveTreeResponse struct { + node *restic.Node + stats ItemStats +} + +// save stores the nodes as a tree in the repo. +func (s *TreeSaver) save(ctx context.Context, snPath string, node *restic.Node, nodes []FutureNode) (*restic.Node, ItemStats, error) { + var stats ItemStats + + tree := restic.NewTree() + for _, fn := range nodes { + fn.wait(ctx) + + // return the error if it wasn't ignored + if fn.err != nil { + debug.Log("err for %v: %v", fn.snPath, fn.err) + fn.err = s.errFn(fn.target, fn.fi, fn.err) + if fn.err == nil { + // ignore error + continue + } + + return nil, stats, fn.err + } + + // when the error is ignored, the node could not be saved, so ignore it + if fn.node == nil { + debug.Log("%v excluded: %v", fn.snPath, fn.target) + continue + } + + debug.Log("insert %v", fn.node.Name) + err := tree.Insert(fn.node) + if err != nil { + return nil, stats, err + } + } + + id, treeStats, err := s.saveTree(ctx, tree) + stats.Add(treeStats) + if err != nil { + return nil, stats, err + } + + node.Subtree = &id + return node, stats, nil +} + +func (s *TreeSaver) worker(ctx context.Context, jobs <-chan saveTreeJob) error { + for { + var job saveTreeJob + select { + case <-ctx.Done(): + return nil + case job = <-jobs: + } + + node, stats, err := s.save(ctx, job.snPath, job.node, job.nodes) + if err != nil { + debug.Log("error saving tree blob: %v", err) + close(job.ch) + return err + } + + job.ch <- saveTreeResponse{ + node: node, + stats: stats, + } + close(job.ch) + } +} diff --git a/internal/archiver/tree_saver_test.go b/internal/archiver/tree_saver_test.go new file mode 100644 index 000000000..3f58da222 --- /dev/null +++ b/internal/archiver/tree_saver_test.go @@ -0,0 +1,120 @@ +package archiver + +import ( + "context" + "fmt" + "os" + "runtime" + "sync/atomic" + "testing" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + tomb "gopkg.in/tomb.v2" +) + +func TestTreeSaver(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var tmb tomb.Tomb + + saveFn := func(context.Context, *restic.Tree) (restic.ID, ItemStats, error) { + return restic.NewRandomID(), ItemStats{TreeBlobs: 1, TreeSize: 123}, nil + } + + errFn := func(snPath string, fi os.FileInfo, err error) error { + return nil + } + + b := NewTreeSaver(ctx, &tmb, uint(runtime.NumCPU()), saveFn, errFn) + + var results []FutureTree + + for i := 0; i < 20; i++ { + node := &restic.Node{ + Name: fmt.Sprintf("file-%d", i), + } + + fb := b.Save(ctx, "/", node, nil) + results = append(results, fb) + } + + for _, tree := range results { + tree.Wait(ctx) + } + + tmb.Kill(nil) + + err := tmb.Wait() + if err != nil { + t.Fatal(err) + } +} + +func TestTreeSaverError(t *testing.T) { + var tests = []struct { + trees int + failAt int32 + }{ + {1, 1}, + {20, 2}, + {20, 5}, + {20, 15}, + {200, 150}, + } + + errTest := errors.New("test error") + + for _, test := range tests { + t.Run("", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var tmb tomb.Tomb + + var num int32 + saveFn := func(context.Context, *restic.Tree) (restic.ID, ItemStats, error) { + val := atomic.AddInt32(&num, 1) + if val == test.failAt { + t.Logf("sending error for request %v\n", test.failAt) + return restic.ID{}, ItemStats{}, errTest + } + return restic.NewRandomID(), ItemStats{TreeBlobs: 1, TreeSize: 123}, nil + } + + errFn := func(snPath string, fi os.FileInfo, err error) error { + t.Logf("ignoring error %v\n", err) + return nil + } + + b := NewTreeSaver(ctx, &tmb, uint(runtime.NumCPU()), saveFn, errFn) + + var results []FutureTree + + for i := 0; i < test.trees; i++ { + node := &restic.Node{ + Name: fmt.Sprintf("file-%d", i), + } + + fb := b.Save(ctx, "/", node, nil) + results = append(results, fb) + } + + for _, tree := range results { + tree.Wait(ctx) + } + + tmb.Kill(nil) + + err := tmb.Wait() + if err == nil { + t.Errorf("expected error not found") + } + + if err != errTest { + t.Fatalf("unexpected error found: %v", err) + } + }) + } +} diff --git a/internal/archiver/tree_test.go b/internal/archiver/tree_test.go new file mode 100644 index 000000000..5d460bb37 --- /dev/null +++ b/internal/archiver/tree_test.go @@ -0,0 +1,464 @@ +package archiver + +import ( + "path/filepath" + "runtime" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/restic/restic/internal/fs" + restictest "github.com/restic/restic/internal/test" +) + +func TestPathComponents(t *testing.T) { + var tests = []struct { + p string + c []string + virtual bool + rel bool + win bool + }{ + { + p: "/foo/bar/baz", + c: []string{"foo", "bar", "baz"}, + }, + { + p: "/foo/bar/baz", + c: []string{"foo", "bar", "baz"}, + rel: true, + }, + { + p: "foo/bar/baz", + c: []string{"foo", "bar", "baz"}, + }, + { + p: "foo/bar/baz", + c: []string{"foo", "bar", "baz"}, + rel: true, + }, + { + p: "../foo/bar/baz", + c: []string{"foo", "bar", "baz"}, + }, + { + p: "../foo/bar/baz", + c: []string{"..", "foo", "bar", "baz"}, + rel: true, + }, + { + p: "c:/foo/bar/baz", + c: []string{"c", "foo", "bar", "baz"}, + virtual: true, + rel: true, + win: true, + }, + { + p: "c:/foo/../bar/baz", + c: []string{"c", "bar", "baz"}, + virtual: true, + win: true, + }, + { + p: `c:\foo\..\bar\baz`, + c: []string{"c", "bar", "baz"}, + virtual: true, + win: true, + }, + { + p: "c:/foo/../bar/baz", + c: []string{"c", "bar", "baz"}, + virtual: true, + rel: true, + win: true, + }, + { + p: `c:\foo\..\bar\baz`, + c: []string{"c", "bar", "baz"}, + virtual: true, + rel: true, + win: true, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + if test.win && runtime.GOOS != "windows" { + t.Skip("skip test on unix") + } + + c, v := pathComponents(fs.Local{}, filepath.FromSlash(test.p), test.rel) + if !cmp.Equal(test.c, c) { + t.Error(test.c, c) + } + + if v != test.virtual { + t.Errorf("unexpected virtual prefix count returned, want %v, got %v", test.virtual, v) + } + }) + } +} + +func TestRootDirectory(t *testing.T) { + var tests = []struct { + target string + root string + unix bool + win bool + }{ + {target: ".", root: "."}, + {target: "foo/bar/baz", root: "."}, + {target: "../foo/bar/baz", root: ".."}, + {target: "..", root: ".."}, + {target: "../../..", root: "../../.."}, + {target: "/home/foo", root: "/", unix: true}, + {target: "c:/home/foo", root: "c:/", win: true}, + {target: `c:\home\foo`, root: `c:\`, win: true}, + {target: "//host/share/foo", root: "//host/share/", win: true}, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + if test.unix && runtime.GOOS == "windows" { + t.Skip("skip test on windows") + } + if test.win && runtime.GOOS != "windows" { + t.Skip("skip test on unix") + } + + root := rootDirectory(fs.Local{}, filepath.FromSlash(test.target)) + want := filepath.FromSlash(test.root) + if root != want { + t.Fatalf("wrong root directory, want %v, got %v", want, root) + } + }) + } +} + +func TestTree(t *testing.T) { + var tests = []struct { + targets []string + src TestDir + want Tree + unix bool + win bool + mustError bool + }{ + { + targets: []string{"foo"}, + want: Tree{Nodes: map[string]Tree{ + "foo": Tree{Path: "foo", Root: "."}, + }}, + }, + { + targets: []string{"foo", "bar", "baz"}, + want: Tree{Nodes: map[string]Tree{ + "foo": Tree{Path: "foo", Root: "."}, + "bar": Tree{Path: "bar", Root: "."}, + "baz": Tree{Path: "baz", Root: "."}, + }}, + }, + { + targets: []string{"foo/user1", "foo/user2", "foo/other"}, + want: Tree{Nodes: map[string]Tree{ + "foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + "user1": Tree{Path: filepath.FromSlash("foo/user1")}, + "user2": Tree{Path: filepath.FromSlash("foo/user2")}, + "other": Tree{Path: filepath.FromSlash("foo/other")}, + }}, + }}, + }, + { + targets: []string{"foo/work/user1", "foo/work/user2"}, + want: Tree{Nodes: map[string]Tree{ + "foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + "work": Tree{FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]Tree{ + "user1": Tree{Path: filepath.FromSlash("foo/work/user1")}, + "user2": Tree{Path: filepath.FromSlash("foo/work/user2")}, + }}, + }}, + }}, + }, + { + targets: []string{"foo/user1", "bar/user1", "foo/other"}, + want: Tree{Nodes: map[string]Tree{ + "foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + "user1": Tree{Path: filepath.FromSlash("foo/user1")}, + "other": Tree{Path: filepath.FromSlash("foo/other")}, + }}, + "bar": Tree{Root: ".", FileInfoPath: "bar", Nodes: map[string]Tree{ + "user1": Tree{Path: filepath.FromSlash("bar/user1")}, + }}, + }}, + }, + { + targets: []string{"../work"}, + want: Tree{Nodes: map[string]Tree{ + "work": Tree{Root: "..", Path: filepath.FromSlash("../work")}, + }}, + }, + { + targets: []string{"../work/other"}, + want: Tree{Nodes: map[string]Tree{ + "work": Tree{Root: "..", FileInfoPath: filepath.FromSlash("../work"), Nodes: map[string]Tree{ + "other": Tree{Path: filepath.FromSlash("../work/other")}, + }}, + }}, + }, + { + targets: []string{"foo/user1", "../work/other", "foo/user2"}, + want: Tree{Nodes: map[string]Tree{ + "foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + "user1": Tree{Path: filepath.FromSlash("foo/user1")}, + "user2": Tree{Path: filepath.FromSlash("foo/user2")}, + }}, + "work": Tree{Root: "..", FileInfoPath: filepath.FromSlash("../work"), Nodes: map[string]Tree{ + "other": Tree{Path: filepath.FromSlash("../work/other")}, + }}, + }}, + }, + { + targets: []string{"foo/user1", "../foo/other", "foo/user2"}, + want: Tree{Nodes: map[string]Tree{ + "foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + "user1": Tree{Path: filepath.FromSlash("foo/user1")}, + "user2": Tree{Path: filepath.FromSlash("foo/user2")}, + }}, + "foo-1": Tree{Root: "..", FileInfoPath: filepath.FromSlash("../foo"), Nodes: map[string]Tree{ + "other": Tree{Path: filepath.FromSlash("../foo/other")}, + }}, + }}, + }, + { + src: TestDir{ + "foo": TestDir{ + "file": TestFile{Content: "file content"}, + "work": TestFile{Content: "work file content"}, + }, + }, + targets: []string{"foo", "foo/work"}, + want: Tree{Nodes: map[string]Tree{ + "foo": Tree{ + Root: ".", + FileInfoPath: "foo", + Nodes: map[string]Tree{ + "file": Tree{Path: filepath.FromSlash("foo/file")}, + "work": Tree{Path: filepath.FromSlash("foo/work")}, + }, + }, + }}, + }, + { + src: TestDir{ + "foo": TestDir{ + "file": TestFile{Content: "file content"}, + "work": TestDir{ + "other": TestFile{Content: "other file content"}, + }, + }, + }, + targets: []string{"foo/work", "foo"}, + want: Tree{Nodes: map[string]Tree{ + "foo": Tree{ + Root: ".", + FileInfoPath: "foo", + Nodes: map[string]Tree{ + "file": Tree{Path: filepath.FromSlash("foo/file")}, + "work": Tree{Path: filepath.FromSlash("foo/work")}, + }, + }, + }}, + }, + { + src: TestDir{ + "foo": TestDir{ + "work": TestDir{ + "user1": TestFile{Content: "file content"}, + "user2": TestFile{Content: "other file content"}, + }, + }, + }, + targets: []string{"foo/work", "foo/work/user2"}, + want: Tree{Nodes: map[string]Tree{ + "foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + "work": Tree{ + FileInfoPath: filepath.FromSlash("foo/work"), + Nodes: map[string]Tree{ + "user1": Tree{Path: filepath.FromSlash("foo/work/user1")}, + "user2": Tree{Path: filepath.FromSlash("foo/work/user2")}, + }, + }, + }}, + }}, + }, + { + src: TestDir{ + "foo": TestDir{ + "work": TestDir{ + "user1": TestFile{Content: "file content"}, + "user2": TestFile{Content: "other file content"}, + }, + }, + }, + targets: []string{"foo/work/user2", "foo/work"}, + want: Tree{Nodes: map[string]Tree{ + "foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + "work": Tree{FileInfoPath: filepath.FromSlash("foo/work"), + Nodes: map[string]Tree{ + "user1": Tree{Path: filepath.FromSlash("foo/work/user1")}, + "user2": Tree{Path: filepath.FromSlash("foo/work/user2")}, + }, + }, + }}, + }}, + }, + { + src: TestDir{ + "foo": TestDir{ + "other": TestFile{Content: "file content"}, + "work": TestDir{ + "user2": TestDir{ + "data": TestDir{ + "secret": TestFile{Content: "secret file content"}, + }, + }, + "user3": TestDir{ + "important.txt": TestFile{Content: "important work"}, + }, + }, + }, + }, + targets: []string{"foo/work/user2/data/secret", "foo"}, + want: Tree{Nodes: map[string]Tree{ + "foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + "other": Tree{Path: filepath.FromSlash("foo/other")}, + "work": Tree{FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]Tree{ + "user2": Tree{FileInfoPath: filepath.FromSlash("foo/work/user2"), Nodes: map[string]Tree{ + "data": Tree{FileInfoPath: filepath.FromSlash("foo/work/user2/data"), Nodes: map[string]Tree{ + "secret": Tree{ + Path: filepath.FromSlash("foo/work/user2/data/secret"), + }, + }}, + }}, + "user3": Tree{Path: filepath.FromSlash("foo/work/user3")}, + }}, + }}, + }}, + }, + { + src: TestDir{ + "mnt": TestDir{ + "driveA": TestDir{ + "work": TestDir{ + "driveB": TestDir{ + "secret": TestFile{Content: "secret file content"}, + }, + "test1": TestDir{ + "important.txt": TestFile{Content: "important work"}, + }, + }, + "test2": TestDir{ + "important.txt": TestFile{Content: "other important work"}, + }, + }, + }, + }, + unix: true, + targets: []string{"mnt/driveA", "mnt/driveA/work/driveB"}, + want: Tree{Nodes: map[string]Tree{ + "mnt": Tree{Root: ".", FileInfoPath: filepath.FromSlash("mnt"), Nodes: map[string]Tree{ + "driveA": Tree{FileInfoPath: filepath.FromSlash("mnt/driveA"), Nodes: map[string]Tree{ + "work": Tree{FileInfoPath: filepath.FromSlash("mnt/driveA/work"), Nodes: map[string]Tree{ + "driveB": Tree{ + Path: filepath.FromSlash("mnt/driveA/work/driveB"), + }, + "test1": Tree{Path: filepath.FromSlash("mnt/driveA/work/test1")}, + }}, + "test2": Tree{Path: filepath.FromSlash("mnt/driveA/test2")}, + }}, + }}, + }}, + }, + { + targets: []string{"foo/work/user", "foo/work/user"}, + want: Tree{Nodes: map[string]Tree{ + "foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + "work": Tree{FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]Tree{ + "user": Tree{Path: filepath.FromSlash("foo/work/user")}, + }}, + }}, + }}, + }, + { + targets: []string{"./foo/work/user", "foo/work/user"}, + want: Tree{Nodes: map[string]Tree{ + "foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + "work": Tree{FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]Tree{ + "user": Tree{Path: filepath.FromSlash("foo/work/user")}, + }}, + }}, + }}, + }, + { + win: true, + targets: []string{`c:\users\foobar\temp`}, + want: Tree{Nodes: map[string]Tree{ + "c": Tree{Root: `c:\`, FileInfoPath: `c:\`, Nodes: map[string]Tree{ + "users": Tree{FileInfoPath: `c:\users`, Nodes: map[string]Tree{ + "foobar": Tree{FileInfoPath: `c:\users\foobar`, Nodes: map[string]Tree{ + "temp": Tree{Path: `c:\users\foobar\temp`}, + }}, + }}, + }}, + }}, + }, + { + targets: []string{"."}, + mustError: true, + }, + { + targets: []string{".."}, + mustError: true, + }, + { + targets: []string{"../.."}, + mustError: true, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + if test.unix && runtime.GOOS == "windows" { + t.Skip("skip test on windows") + } + + if test.win && runtime.GOOS != "windows" { + t.Skip("skip test on unix") + } + + tempdir, cleanup := restictest.TempDir(t) + defer cleanup() + + TestCreateFiles(t, tempdir, test.src) + + back := fs.TestChdir(t, tempdir) + defer back() + + tree, err := NewTree(fs.Local{}, test.targets) + if test.mustError { + if err == nil { + t.Fatal("expected error, got nil") + } + t.Logf("found expected error: %v", err) + return + } + + if err != nil { + t.Fatal(err) + } + + if !cmp.Equal(&test.want, tree) { + t.Error(cmp.Diff(&test.want, tree)) + } + }) + } +} diff --git a/internal/backend/azure/azure.go b/internal/backend/azure/azure.go new file mode 100644 index 000000000..7a4617a44 --- /dev/null +++ b/internal/backend/azure/azure.go @@ -0,0 +1,403 @@ +package azure + +import ( + "context" + "encoding/base64" + "io" + "io/ioutil" + "net/http" + "os" + "path" + "strings" + + "github.com/Azure/azure-sdk-for-go/storage" + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" +) + +// Backend stores data on an azure endpoint. +type Backend struct { + accountName string + container *storage.Container + sem *backend.Semaphore + prefix string + listMaxItems int + backend.Layout +} + +const defaultListMaxItems = 5000 + +// make sure that *Backend implements backend.Backend +var _ restic.Backend = &Backend{} + +func open(cfg Config, rt http.RoundTripper) (*Backend, error) { + debug.Log("open, config %#v", cfg) + + client, err := storage.NewBasicClient(cfg.AccountName, cfg.AccountKey) + if err != nil { + return nil, errors.Wrap(err, "NewBasicClient") + } + + client.HTTPClient = &http.Client{Transport: rt} + + service := client.GetBlobService() + + sem, err := backend.NewSemaphore(cfg.Connections) + if err != nil { + return nil, err + } + + be := &Backend{ + container: service.GetContainerReference(cfg.Container), + accountName: cfg.AccountName, + sem: sem, + prefix: cfg.Prefix, + Layout: &backend.DefaultLayout{ + Path: cfg.Prefix, + Join: path.Join, + }, + listMaxItems: defaultListMaxItems, + } + + return be, nil +} + +// Open opens the Azure backend at specified container. +func Open(cfg Config, rt http.RoundTripper) (*Backend, error) { + return open(cfg, rt) +} + +// Create opens the Azure backend at specified container and creates the container if +// it does not exist yet. +func Create(cfg Config, rt http.RoundTripper) (*Backend, error) { + be, err := open(cfg, rt) + + if err != nil { + return nil, errors.Wrap(err, "open") + } + + options := storage.CreateContainerOptions{ + Access: storage.ContainerAccessTypePrivate, + } + + _, err = be.container.CreateIfNotExists(&options) + if err != nil { + return nil, errors.Wrap(err, "container.CreateIfNotExists") + } + + return be, nil +} + +// SetListMaxItems sets the number of list items to load per request. +func (be *Backend) SetListMaxItems(i int) { + be.listMaxItems = i +} + +// IsNotExist returns true if the error is caused by a not existing file. +func (be *Backend) IsNotExist(err error) bool { + debug.Log("IsNotExist(%T, %#v)", err, err) + return os.IsNotExist(err) +} + +// Join combines path components with slashes. +func (be *Backend) Join(p ...string) string { + return path.Join(p...) +} + +// Location returns this backend's location (the container name). +func (be *Backend) Location() string { + return be.Join(be.container.Name, be.prefix) +} + +// Path returns the path in the bucket that is used for this backend. +func (be *Backend) Path() string { + return be.prefix +} + +// Save stores data in the backend at the handle. +func (be *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error { + if err := h.Valid(); err != nil { + return err + } + + objName := be.Filename(h) + + debug.Log("Save %v at %v", h, objName) + + be.sem.GetToken() + + debug.Log("InsertObject(%v, %v)", be.container.Name, objName) + + var err error + if rd.Length() < 256*1024*1024 { + // wrap the reader so that net/http client cannot close the reader + dataReader := ioutil.NopCloser(rd) + + // if it's smaller than 256miB, then just create the file directly from the reader + err = be.container.GetBlobReference(objName).CreateBlockBlobFromReader(dataReader, nil) + } else { + // otherwise use the more complicated method + err = be.saveLarge(ctx, objName, rd) + + } + + be.sem.ReleaseToken() + debug.Log("%v, err %#v", objName, err) + + return errors.Wrap(err, "CreateBlockBlobFromReader") +} + +func (be *Backend) saveLarge(ctx context.Context, objName string, rd restic.RewindReader) error { + // create the file on the server + file := be.container.GetBlobReference(objName) + err := file.CreateBlockBlob(nil) + if err != nil { + return errors.Wrap(err, "CreateBlockBlob") + } + + // read the data, in 100 MiB chunks + buf := make([]byte, 100*1024*1024) + var blocks []storage.Block + + for { + n, err := io.ReadFull(rd, buf) + if err == io.ErrUnexpectedEOF { + err = nil + } + if err == io.EOF { + // end of file reached, no bytes have been read at all + break + } + + if err != nil { + return errors.Wrap(err, "ReadFull") + } + + buf = buf[:n] + + // upload it as a new "block", use the base64 hash for the ID + h := restic.Hash(buf) + id := base64.StdEncoding.EncodeToString(h[:]) + debug.Log("PutBlock %v with %d bytes", id, len(buf)) + err = file.PutBlock(id, buf, nil) + if err != nil { + return errors.Wrap(err, "PutBlock") + } + + blocks = append(blocks, storage.Block{ + ID: id, + Status: "Uncommitted", + }) + } + + debug.Log("uploaded %d parts: %v", len(blocks), blocks) + err = file.PutBlockList(blocks, nil) + debug.Log("PutBlockList returned %v", err) + return errors.Wrap(err, "PutBlockList") +} + +// wrapReader wraps an io.ReadCloser to run an additional function on Close. +type wrapReader struct { + io.ReadCloser + f func() +} + +func (wr wrapReader) Close() error { + err := wr.ReadCloser.Close() + wr.f() + return err +} + +// Load runs fn with a reader that yields the contents of the file at h at the +// given offset. +func (be *Backend) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error { + return backend.DefaultLoad(ctx, h, length, offset, be.openReader, fn) +} + +func (be *Backend) openReader(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) { + debug.Log("Load %v, length %v, offset %v from %v", h, length, offset, be.Filename(h)) + if err := h.Valid(); err != nil { + return nil, err + } + + if offset < 0 { + return nil, errors.New("offset is negative") + } + + if length < 0 { + return nil, errors.Errorf("invalid length %d", length) + } + + objName := be.Filename(h) + blob := be.container.GetBlobReference(objName) + + start := uint64(offset) + var end uint64 + + if length > 0 { + end = uint64(offset + int64(length) - 1) + } else { + end = 0 + } + + be.sem.GetToken() + + rd, err := blob.GetRange(&storage.GetBlobRangeOptions{Range: &storage.BlobRange{Start: start, End: end}}) + if err != nil { + be.sem.ReleaseToken() + return nil, err + } + + closeRd := wrapReader{ + ReadCloser: rd, + f: func() { + debug.Log("Close()") + be.sem.ReleaseToken() + }, + } + + return closeRd, err +} + +// Stat returns information about a blob. +func (be *Backend) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) { + debug.Log("%v", h) + + objName := be.Filename(h) + blob := be.container.GetBlobReference(objName) + + be.sem.GetToken() + err := blob.GetProperties(nil) + be.sem.ReleaseToken() + + if err != nil { + debug.Log("blob.GetProperties err %v", err) + return restic.FileInfo{}, errors.Wrap(err, "blob.GetProperties") + } + + fi := restic.FileInfo{ + Size: int64(blob.Properties.ContentLength), + Name: h.Name, + } + return fi, nil +} + +// Test returns true if a blob of the given type and name exists in the backend. +func (be *Backend) Test(ctx context.Context, h restic.Handle) (bool, error) { + objName := be.Filename(h) + + be.sem.GetToken() + found, err := be.container.GetBlobReference(objName).Exists() + be.sem.ReleaseToken() + + if err != nil { + return false, err + } + return found, nil +} + +// Remove removes the blob with the given name and type. +func (be *Backend) Remove(ctx context.Context, h restic.Handle) error { + objName := be.Filename(h) + + be.sem.GetToken() + _, err := be.container.GetBlobReference(objName).DeleteIfExists(nil) + be.sem.ReleaseToken() + + debug.Log("Remove(%v) at %v -> err %v", h, objName, err) + return errors.Wrap(err, "client.RemoveObject") +} + +// List runs fn for each file in the backend which has the type t. When an +// error occurs (or fn returns an error), List stops and returns it. +func (be *Backend) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error { + debug.Log("listing %v", t) + + prefix, _ := be.Basedir(t) + + // make sure prefix ends with a slash + if !strings.HasSuffix(prefix, "/") { + prefix += "/" + } + + params := storage.ListBlobsParameters{ + MaxResults: uint(be.listMaxItems), + Prefix: prefix, + } + + for { + be.sem.GetToken() + obj, err := be.container.ListBlobs(params) + be.sem.ReleaseToken() + + if err != nil { + return err + } + + debug.Log("got %v objects", len(obj.Blobs)) + + for _, item := range obj.Blobs { + m := strings.TrimPrefix(item.Name, prefix) + if m == "" { + continue + } + + fi := restic.FileInfo{ + Name: path.Base(m), + Size: item.Properties.ContentLength, + } + + if ctx.Err() != nil { + return ctx.Err() + } + + err := fn(fi) + if err != nil { + return err + } + + if ctx.Err() != nil { + return ctx.Err() + } + + } + + if obj.NextMarker == "" { + break + } + params.Marker = obj.NextMarker + } + + return ctx.Err() +} + +// Remove keys for a specified backend type. +func (be *Backend) removeKeys(ctx context.Context, t restic.FileType) error { + return be.List(ctx, t, func(fi restic.FileInfo) error { + return be.Remove(ctx, restic.Handle{Type: t, Name: fi.Name}) + }) +} + +// Delete removes all restic keys in the bucket. It will not remove the bucket itself. +func (be *Backend) Delete(ctx context.Context) error { + alltypes := []restic.FileType{ + restic.DataFile, + restic.KeyFile, + restic.LockFile, + restic.SnapshotFile, + restic.IndexFile} + + for _, t := range alltypes { + err := be.removeKeys(ctx, t) + if err != nil { + return nil + } + } + + return be.Remove(ctx, restic.Handle{Type: restic.ConfigFile}) +} + +// Close does nothing +func (be *Backend) Close() error { return nil } diff --git a/internal/backend/azure/azure_test.go b/internal/backend/azure/azure_test.go new file mode 100644 index 000000000..f5ef72395 --- /dev/null +++ b/internal/backend/azure/azure_test.go @@ -0,0 +1,218 @@ +package azure_test + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "testing" + "time" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/backend/azure" + "github.com/restic/restic/internal/backend/test" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func newAzureTestSuite(t testing.TB) *test.Suite { + tr, err := backend.Transport(backend.TransportOptions{}) + if err != nil { + t.Fatalf("cannot create transport for tests: %v", err) + } + + return &test.Suite{ + // do not use excessive data + MinimalData: true, + + // NewConfig returns a config for a new temporary backend that will be used in tests. + NewConfig: func() (interface{}, error) { + azcfg, err := azure.ParseConfig(os.Getenv("RESTIC_TEST_AZURE_REPOSITORY")) + if err != nil { + return nil, err + } + + cfg := azcfg.(azure.Config) + cfg.AccountName = os.Getenv("RESTIC_TEST_AZURE_ACCOUNT_NAME") + cfg.AccountKey = os.Getenv("RESTIC_TEST_AZURE_ACCOUNT_KEY") + cfg.Prefix = fmt.Sprintf("test-%d", time.Now().UnixNano()) + return cfg, nil + }, + + // CreateFn is a function that creates a temporary repository for the tests. + Create: func(config interface{}) (restic.Backend, error) { + cfg := config.(azure.Config) + + be, err := azure.Create(cfg, tr) + if err != nil { + return nil, err + } + + exists, err := be.Test(context.TODO(), restic.Handle{Type: restic.ConfigFile}) + if err != nil { + return nil, err + } + + if exists { + return nil, errors.New("config already exists") + } + + return be, nil + }, + + // OpenFn is a function that opens a previously created temporary repository. + Open: func(config interface{}) (restic.Backend, error) { + cfg := config.(azure.Config) + + return azure.Open(cfg, tr) + }, + + // CleanupFn removes data created during the tests. + Cleanup: func(config interface{}) error { + cfg := config.(azure.Config) + + be, err := azure.Open(cfg, tr) + if err != nil { + return err + } + + return be.Delete(context.TODO()) + }, + } +} + +func TestBackendAzure(t *testing.T) { + defer func() { + if t.Skipped() { + rtest.SkipDisallowed(t, "restic/backend/azure.TestBackendAzure") + } + }() + + vars := []string{ + "RESTIC_TEST_AZURE_ACCOUNT_NAME", + "RESTIC_TEST_AZURE_ACCOUNT_KEY", + "RESTIC_TEST_AZURE_REPOSITORY", + } + + for _, v := range vars { + if os.Getenv(v) == "" { + t.Skipf("environment variable %v not set", v) + return + } + } + + t.Logf("run tests") + newAzureTestSuite(t).RunTests(t) +} + +func BenchmarkBackendAzure(t *testing.B) { + vars := []string{ + "RESTIC_TEST_AZURE_ACCOUNT_NAME", + "RESTIC_TEST_AZURE_ACCOUNT_KEY", + "RESTIC_TEST_AZURE_REPOSITORY", + } + + for _, v := range vars { + if os.Getenv(v) == "" { + t.Skipf("environment variable %v not set", v) + return + } + } + + t.Logf("run tests") + newAzureTestSuite(t).RunBenchmarks(t) +} + +func TestUploadLargeFile(t *testing.T) { + if os.Getenv("RESTIC_AZURE_TEST_LARGE_UPLOAD") == "" { + t.Skip("set RESTIC_AZURE_TEST_LARGE_UPLOAD=1 to test large uploads") + return + } + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + if os.Getenv("RESTIC_TEST_AZURE_REPOSITORY") == "" { + t.Skipf("environment variables not available") + return + } + + azcfg, err := azure.ParseConfig(os.Getenv("RESTIC_TEST_AZURE_REPOSITORY")) + if err != nil { + if err != nil { + t.Fatal(err) + } + } + + cfg := azcfg.(azure.Config) + cfg.AccountName = os.Getenv("RESTIC_TEST_AZURE_ACCOUNT_NAME") + cfg.AccountKey = os.Getenv("RESTIC_TEST_AZURE_ACCOUNT_KEY") + cfg.Prefix = fmt.Sprintf("test-upload-large-%d", time.Now().UnixNano()) + + tr, err := backend.Transport(backend.TransportOptions{}) + if err != nil { + t.Fatal(err) + } + + be, err := azure.Create(cfg, tr) + if err != nil { + if err != nil { + t.Fatal(err) + } + } + + defer func() { + err := be.Delete(ctx) + if err != nil { + t.Fatal(err) + } + }() + + data := rtest.Random(23, 300*1024*1024) + id := restic.Hash(data) + h := restic.Handle{Name: id.String(), Type: restic.DataFile} + + t.Logf("hash of %d bytes: %v", len(data), id) + + err = be.Save(ctx, h, restic.NewByteReader(data)) + if err != nil { + t.Fatal(err) + } + defer func() { + err := be.Remove(ctx, h) + if err != nil { + t.Fatal(err) + } + }() + + var tests = []struct { + offset, length int + }{ + {0, len(data)}, + {23, 1024}, + {23 + 100*1024, 500}, + {888 + 200*1024, 89999}, + {888 + 100*1024*1024, 120 * 1024 * 1024}, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + want := data[test.offset : test.offset+test.length] + + buf := make([]byte, test.length) + err = be.Load(ctx, h, test.length, int64(test.offset), func(rd io.Reader) error { + _, err = io.ReadFull(rd, buf) + return err + }) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(buf, want) { + t.Fatalf("wrong bytes returned") + } + }) + } +} diff --git a/internal/backend/azure/config.go b/internal/backend/azure/config.go new file mode 100644 index 000000000..15a2e5cf9 --- /dev/null +++ b/internal/backend/azure/config.go @@ -0,0 +1,57 @@ +package azure + +import ( + "path" + "strings" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/options" +) + +// Config contains all configuration necessary to connect to an azure compatible +// server. +type Config struct { + AccountName string + AccountKey string + Container string + Prefix string + + Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 20)"` +} + +// NewConfig returns a new Config with the default values filled in. +func NewConfig() Config { + return Config{ + Connections: 5, + } +} + +func init() { + options.Register("azure", Config{}) +} + +// ParseConfig parses the string s and extracts the azure config. The +// configuration format is azure:containerName:/[prefix]. +func ParseConfig(s string) (interface{}, error) { + if !strings.HasPrefix(s, "azure:") { + return nil, errors.New("azure: invalid format") + } + + // strip prefix "azure:" + s = s[6:] + + // use the first entry of the path as the bucket name and the + // remainder as prefix + data := strings.SplitN(s, ":", 2) + if len(data) < 2 { + return nil, errors.New("azure: invalid format: bucket name or path not found") + } + container, path := data[0], path.Clean(data[1]) + if strings.HasPrefix(path, "/") { + path = path[1:] + } + cfg := NewConfig() + cfg.Container = container + cfg.Prefix = path + return cfg, nil +} diff --git a/internal/backend/azure/config_test.go b/internal/backend/azure/config_test.go new file mode 100644 index 000000000..a57542e77 --- /dev/null +++ b/internal/backend/azure/config_test.go @@ -0,0 +1,40 @@ +package azure + +import "testing" + +var configTests = []struct { + s string + cfg Config +}{ + {"azure:container-name:/", Config{ + Container: "container-name", + Prefix: "", + Connections: 5, + }}, + {"azure:container-name:/prefix/directory", Config{ + Container: "container-name", + Prefix: "prefix/directory", + Connections: 5, + }}, + {"azure:container-name:/prefix/directory/", Config{ + Container: "container-name", + Prefix: "prefix/directory", + Connections: 5, + }}, +} + +func TestParseConfig(t *testing.T) { + for i, test := range configTests { + cfg, err := ParseConfig(test.s) + if err != nil { + t.Errorf("test %d:%s failed: %v", i, test.s, err) + continue + } + + if cfg != test.cfg { + t.Errorf("test %d:\ninput:\n %s\n wrong config, want:\n %v\ngot:\n %v", + i, test.s, test.cfg, cfg) + continue + } + } +} diff --git a/internal/backend/b2/b2.go b/internal/backend/b2/b2.go new file mode 100644 index 000000000..2d1f3a3b0 --- /dev/null +++ b/internal/backend/b2/b2.go @@ -0,0 +1,332 @@ +package b2 + +import ( + "context" + "io" + "net/http" + "path" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + + "github.com/kurin/blazer/b2" +) + +// b2Backend is a backend which stores its data on Backblaze B2. +type b2Backend struct { + client *b2.Client + bucket *b2.Bucket + cfg Config + listMaxItems int + backend.Layout + sem *backend.Semaphore +} + +const defaultListMaxItems = 1000 + +// ensure statically that *b2Backend implements restic.Backend. +var _ restic.Backend = &b2Backend{} + +func newClient(ctx context.Context, cfg Config, rt http.RoundTripper) (*b2.Client, error) { + opts := []b2.ClientOption{b2.Transport(rt)} + + c, err := b2.NewClient(ctx, cfg.AccountID, cfg.Key, opts...) + if err != nil { + return nil, errors.Wrap(err, "b2.NewClient") + } + return c, nil +} + +// Open opens a connection to the B2 service. +func Open(ctx context.Context, cfg Config, rt http.RoundTripper) (restic.Backend, error) { + debug.Log("cfg %#v", cfg) + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, err := newClient(ctx, cfg, rt) + if err != nil { + return nil, err + } + + bucket, err := client.Bucket(ctx, cfg.Bucket) + if err != nil { + return nil, errors.Wrap(err, "Bucket") + } + + sem, err := backend.NewSemaphore(cfg.Connections) + if err != nil { + return nil, err + } + + be := &b2Backend{ + client: client, + bucket: bucket, + cfg: cfg, + Layout: &backend.DefaultLayout{ + Join: path.Join, + Path: cfg.Prefix, + }, + listMaxItems: defaultListMaxItems, + sem: sem, + } + + return be, nil +} + +// Create opens a connection to the B2 service. If the bucket does not exist yet, +// it is created. +func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (restic.Backend, error) { + debug.Log("cfg %#v", cfg) + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, err := newClient(ctx, cfg, rt) + if err != nil { + return nil, err + } + + attr := b2.BucketAttrs{ + Type: b2.Private, + } + bucket, err := client.NewBucket(ctx, cfg.Bucket, &attr) + if err != nil { + return nil, errors.Wrap(err, "NewBucket") + } + + sem, err := backend.NewSemaphore(cfg.Connections) + if err != nil { + return nil, err + } + + be := &b2Backend{ + client: client, + bucket: bucket, + cfg: cfg, + Layout: &backend.DefaultLayout{ + Join: path.Join, + Path: cfg.Prefix, + }, + listMaxItems: defaultListMaxItems, + sem: sem, + } + + present, err := be.Test(ctx, restic.Handle{Type: restic.ConfigFile}) + if err != nil { + return nil, err + } + + if present { + return nil, errors.New("config already exists") + } + + return be, nil +} + +// SetListMaxItems sets the number of list items to load per request. +func (be *b2Backend) SetListMaxItems(i int) { + be.listMaxItems = i +} + +// Location returns the location for the backend. +func (be *b2Backend) Location() string { + return be.cfg.Bucket +} + +// IsNotExist returns true if the error is caused by a non-existing file. +func (be *b2Backend) IsNotExist(err error) bool { + return b2.IsNotExist(errors.Cause(err)) +} + +// Load runs fn with a reader that yields the contents of the file at h at the +// given offset. +func (be *b2Backend) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error { + return backend.DefaultLoad(ctx, h, length, offset, be.openReader, fn) +} + +func (be *b2Backend) openReader(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) { + debug.Log("Load %v, length %v, offset %v from %v", h, length, offset, be.Filename(h)) + if err := h.Valid(); err != nil { + return nil, err + } + + if offset < 0 { + return nil, errors.New("offset is negative") + } + + if length < 0 { + return nil, errors.Errorf("invalid length %d", length) + } + + ctx, cancel := context.WithCancel(ctx) + + be.sem.GetToken() + + name := be.Layout.Filename(h) + obj := be.bucket.Object(name) + + if offset == 0 && length == 0 { + rd := obj.NewReader(ctx) + return be.sem.ReleaseTokenOnClose(rd, cancel), nil + } + + // pass a negative length to NewRangeReader so that the remainder of the + // file is read. + if length == 0 { + length = -1 + } + + rd := obj.NewRangeReader(ctx, offset, int64(length)) + return be.sem.ReleaseTokenOnClose(rd, cancel), nil +} + +// Save stores data in the backend at the handle. +func (be *b2Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if err := h.Valid(); err != nil { + return err + } + + be.sem.GetToken() + defer be.sem.ReleaseToken() + + name := be.Filename(h) + debug.Log("Save %v, name %v", h, name) + obj := be.bucket.Object(name) + + w := obj.NewWriter(ctx) + n, err := io.Copy(w, rd) + debug.Log(" saved %d bytes, err %v", n, err) + + if err != nil { + _ = w.Close() + return errors.Wrap(err, "Copy") + } + + return errors.Wrap(w.Close(), "Close") +} + +// Stat returns information about a blob. +func (be *b2Backend) Stat(ctx context.Context, h restic.Handle) (bi restic.FileInfo, err error) { + debug.Log("Stat %v", h) + + be.sem.GetToken() + defer be.sem.ReleaseToken() + + name := be.Filename(h) + obj := be.bucket.Object(name) + info, err := obj.Attrs(ctx) + if err != nil { + debug.Log("Attrs() err %v", err) + return restic.FileInfo{}, errors.Wrap(err, "Stat") + } + return restic.FileInfo{Size: info.Size, Name: h.Name}, nil +} + +// Test returns true if a blob of the given type and name exists in the backend. +func (be *b2Backend) Test(ctx context.Context, h restic.Handle) (bool, error) { + debug.Log("Test %v", h) + + be.sem.GetToken() + defer be.sem.ReleaseToken() + + found := false + name := be.Filename(h) + obj := be.bucket.Object(name) + info, err := obj.Attrs(ctx) + if err == nil && info != nil && info.Status == b2.Uploaded { + found = true + } + return found, nil +} + +// Remove removes the blob with the given name and type. +func (be *b2Backend) Remove(ctx context.Context, h restic.Handle) error { + debug.Log("Remove %v", h) + + be.sem.GetToken() + defer be.sem.ReleaseToken() + + obj := be.bucket.Object(be.Filename(h)) + return errors.Wrap(obj.Delete(ctx), "Delete") +} + +type semLocker struct { + *backend.Semaphore +} + +func (sm semLocker) Lock() { sm.GetToken() } +func (sm semLocker) Unlock() { sm.ReleaseToken() } + +// List returns a channel that yields all names of blobs of type t. +func (be *b2Backend) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error { + debug.Log("List %v", t) + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + prefix, _ := be.Basedir(t) + iter := be.bucket.List(ctx, b2.ListPrefix(prefix), b2.ListPageSize(be.listMaxItems), b2.ListLocker(semLocker{be.sem})) + + for iter.Next() { + obj := iter.Object() + + attrs, err := obj.Attrs(ctx) + if err != nil { + return err + } + + fi := restic.FileInfo{ + Name: path.Base(obj.Name()), + Size: attrs.Size, + } + + if err := fn(fi); err != nil { + return err + } + } + if err := iter.Err(); err != nil { + debug.Log("List: %v", err) + return err + } + return nil +} + +// Remove keys for a specified backend type. +func (be *b2Backend) removeKeys(ctx context.Context, t restic.FileType) error { + debug.Log("removeKeys %v", t) + return be.List(ctx, t, func(fi restic.FileInfo) error { + return be.Remove(ctx, restic.Handle{Type: t, Name: fi.Name}) + }) +} + +// Delete removes all restic keys in the bucket. It will not remove the bucket itself. +func (be *b2Backend) Delete(ctx context.Context) error { + alltypes := []restic.FileType{ + restic.DataFile, + restic.KeyFile, + restic.LockFile, + restic.SnapshotFile, + restic.IndexFile} + + for _, t := range alltypes { + err := be.removeKeys(ctx, t) + if err != nil { + return nil + } + } + err := be.Remove(ctx, restic.Handle{Type: restic.ConfigFile}) + if err != nil && b2.IsNotExist(errors.Cause(err)) { + err = nil + } + + return err +} + +// Close does nothing +func (be *b2Backend) Close() error { return nil } diff --git a/internal/backend/b2/b2_test.go b/internal/backend/b2/b2_test.go new file mode 100644 index 000000000..9f97de4f9 --- /dev/null +++ b/internal/backend/b2/b2_test.go @@ -0,0 +1,99 @@ +package b2_test + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/backend/b2" + "github.com/restic/restic/internal/backend/test" + "github.com/restic/restic/internal/restic" + + rtest "github.com/restic/restic/internal/test" +) + +func newB2TestSuite(t testing.TB) *test.Suite { + tr, err := backend.Transport(backend.TransportOptions{}) + if err != nil { + t.Fatalf("cannot create transport for tests: %v", err) + } + + return &test.Suite{ + // do not use excessive data + MinimalData: true, + + // wait for at most 10 seconds for removed files to disappear + WaitForDelayedRemoval: 10 * time.Second, + + // NewConfig returns a config for a new temporary backend that will be used in tests. + NewConfig: func() (interface{}, error) { + b2cfg, err := b2.ParseConfig(os.Getenv("RESTIC_TEST_B2_REPOSITORY")) + if err != nil { + return nil, err + } + + cfg := b2cfg.(b2.Config) + cfg.AccountID = os.Getenv("RESTIC_TEST_B2_ACCOUNT_ID") + cfg.Key = os.Getenv("RESTIC_TEST_B2_ACCOUNT_KEY") + cfg.Prefix = fmt.Sprintf("test-%d", time.Now().UnixNano()) + return cfg, nil + }, + + // CreateFn is a function that creates a temporary repository for the tests. + Create: func(config interface{}) (restic.Backend, error) { + cfg := config.(b2.Config) + return b2.Create(context.Background(), cfg, tr) + }, + + // OpenFn is a function that opens a previously created temporary repository. + Open: func(config interface{}) (restic.Backend, error) { + cfg := config.(b2.Config) + return b2.Open(context.Background(), cfg, tr) + }, + + // CleanupFn removes data created during the tests. + Cleanup: func(config interface{}) error { + cfg := config.(b2.Config) + be, err := b2.Open(context.Background(), cfg, tr) + if err != nil { + return err + } + + return be.Delete(context.TODO()) + }, + } +} + +func testVars(t testing.TB) { + vars := []string{ + "RESTIC_TEST_B2_ACCOUNT_ID", + "RESTIC_TEST_B2_ACCOUNT_KEY", + "RESTIC_TEST_B2_REPOSITORY", + } + + for _, v := range vars { + if os.Getenv(v) == "" { + t.Skipf("environment variable %v not set", v) + return + } + } +} + +func TestBackendB2(t *testing.T) { + defer func() { + if t.Skipped() { + rtest.SkipDisallowed(t, "restic/backend/b2.TestBackendB2") + } + }() + + testVars(t) + newB2TestSuite(t).RunTests(t) +} + +func BenchmarkBackendb2(t *testing.B) { + testVars(t) + newB2TestSuite(t).RunBenchmarks(t) +} diff --git a/internal/backend/b2/config.go b/internal/backend/b2/config.go new file mode 100644 index 000000000..f10e4d10d --- /dev/null +++ b/internal/backend/b2/config.go @@ -0,0 +1,93 @@ +package b2 + +import ( + "path" + "regexp" + "strings" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/options" +) + +// Config contains all configuration necessary to connect to an b2 compatible +// server. +type Config struct { + AccountID string + Key string + Bucket string + Prefix string + + Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"` +} + +// NewConfig returns a new config with default options applied. +func NewConfig() Config { + return Config{ + Connections: 5, + } +} + +func init() { + options.Register("b2", Config{}) +} + +var bucketName = regexp.MustCompile("^[a-zA-Z0-9-]+$") + +// checkBucketName tests the bucket name against the rules at +// https://help.backblaze.com/hc/en-us/articles/217666908-What-you-need-to-know-about-B2-Bucket-names +func checkBucketName(name string) error { + if name == "" { + return errors.New("bucket name is empty") + } + + if len(name) < 6 { + return errors.New("bucket name is too short") + } + + if len(name) > 50 { + return errors.New("bucket name is too long") + } + + if !bucketName.MatchString(name) { + return errors.New("bucket name contains invalid characters, allowed are: a-z, 0-9, dash (-)") + } + + return nil +} + +// ParseConfig parses the string s and extracts the b2 config. The supported +// configuration format is b2:bucketname/prefix. If no prefix is given the +// prefix "restic" will be used. +func ParseConfig(s string) (interface{}, error) { + if !strings.HasPrefix(s, "b2:") { + return nil, errors.New("invalid format, want: b2:bucket-name[:path]") + } + + s = s[3:] + data := strings.SplitN(s, ":", 2) + if len(data) == 0 || len(data[0]) == 0 { + return nil, errors.New("bucket name not found") + } + + cfg := NewConfig() + cfg.Bucket = data[0] + + if err := checkBucketName(cfg.Bucket); err != nil { + return nil, err + } + + if len(data) == 2 { + p := data[1] + if len(p) > 0 { + p = path.Clean(p) + } + + if len(p) > 0 && path.IsAbs(p) { + p = p[1:] + } + + cfg.Prefix = p + } + + return cfg, nil +} diff --git a/internal/backend/b2/config_test.go b/internal/backend/b2/config_test.go new file mode 100644 index 000000000..4194cb62c --- /dev/null +++ b/internal/backend/b2/config_test.go @@ -0,0 +1,92 @@ +package b2 + +import "testing" + +var configTests = []struct { + s string + cfg Config +}{ + {"b2:bucketname", Config{ + Bucket: "bucketname", + Prefix: "", + Connections: 5, + }}, + {"b2:bucketname:", Config{ + Bucket: "bucketname", + Prefix: "", + Connections: 5, + }}, + {"b2:bucketname:/prefix/directory", Config{ + Bucket: "bucketname", + Prefix: "prefix/directory", + Connections: 5, + }}, + {"b2:foobar", Config{ + Bucket: "foobar", + Prefix: "", + Connections: 5, + }}, + {"b2:foobar:", Config{ + Bucket: "foobar", + Prefix: "", + Connections: 5, + }}, + {"b2:foobar:/", Config{ + Bucket: "foobar", + Prefix: "", + Connections: 5, + }}, +} + +func TestParseConfig(t *testing.T) { + for _, test := range configTests { + t.Run("", func(t *testing.T) { + cfg, err := ParseConfig(test.s) + if err != nil { + t.Fatalf("%s failed: %v", test.s, err) + } + + if cfg != test.cfg { + t.Fatalf("input: %s\n wrong config, want:\n %#v\ngot:\n %#v", + test.s, test.cfg, cfg) + } + }) + } +} + +var invalidConfigTests = []struct { + s string + err string +}{ + { + "b2", + "invalid format, want: b2:bucket-name[:path]", + }, + { + "b2:", + "bucket name not found", + }, + { + "b2:bucket_name", + "bucket name contains invalid characters, allowed are: a-z, 0-9, dash (-)", + }, + { + "b2:bucketname/prefix/directory/", + "bucket name contains invalid characters, allowed are: a-z, 0-9, dash (-)", + }, +} + +func TestInvalidConfig(t *testing.T) { + for _, test := range invalidConfigTests { + t.Run("", func(t *testing.T) { + cfg, err := ParseConfig(test.s) + if err == nil { + t.Fatalf("expected error not found for invalid config: %v, cfg is:\n%#v", test.s, cfg) + } + + if err.Error() != test.err { + t.Fatalf("unexpected error found, want:\n %v\ngot:\n %v", test.err, err.Error()) + } + }) + } +} diff --git a/internal/backend/backend_error.go b/internal/backend/backend_error.go new file mode 100644 index 000000000..77a931858 --- /dev/null +++ b/internal/backend/backend_error.go @@ -0,0 +1,84 @@ +package backend + +import ( + "context" + "io" + "io/ioutil" + "math/rand" + "sync" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" +) + +// ErrorBackend is used to induce errors into various function calls and test +// the retry functions. +type ErrorBackend struct { + FailSave float32 + FailSaveRead float32 + FailLoad float32 + FailStat float32 + restic.Backend + + r *rand.Rand + m sync.Mutex +} + +// statically ensure that ErrorBackend implements restic.Backend. +var _ restic.Backend = &ErrorBackend{} + +// NewErrorBackend wraps be with a backend that returns errors according to +// given probabilities. +func NewErrorBackend(be restic.Backend, seed int64) *ErrorBackend { + return &ErrorBackend{ + Backend: be, + r: rand.New(rand.NewSource(seed)), + } +} + +func (be *ErrorBackend) fail(p float32) bool { + be.m.Lock() + v := be.r.Float32() + be.m.Unlock() + + return v < p +} + +// Save stores the data in the backend under the given handle. +func (be *ErrorBackend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error { + if be.fail(be.FailSave) { + return errors.Errorf("Save(%v) random error induced", h) + } + + if be.fail(be.FailSaveRead) { + _, err := io.CopyN(ioutil.Discard, rd, be.r.Int63n(1000)) + if err != nil { + return err + } + + return errors.Errorf("Save(%v) random error with partial read induced", h) + } + + return be.Backend.Save(ctx, h, rd) +} + +// Load returns a reader that yields the contents of the file at h at the +// given offset. If length is larger than zero, only a portion of the file +// is returned. rd must be closed after use. If an error is returned, the +// ReadCloser must be nil. +func (be *ErrorBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64, consumer func(rd io.Reader) error) error { + if be.fail(be.FailLoad) { + return errors.Errorf("Load(%v, %v, %v) random error induced", h, length, offset) + } + + return be.Backend.Load(ctx, h, length, offset, consumer) +} + +// Stat returns information about the File identified by h. +func (be *ErrorBackend) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) { + if be.fail(be.FailLoad) { + return restic.FileInfo{}, errors.Errorf("Stat(%v) random error induced", h) + } + + return be.Stat(ctx, h) +} diff --git a/internal/backend/backend_retry.go b/internal/backend/backend_retry.go new file mode 100644 index 000000000..372d0a98f --- /dev/null +++ b/internal/backend/backend_retry.go @@ -0,0 +1,148 @@ +package backend + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/cenkalti/backoff" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/restic" +) + +// RetryBackend retries operations on the backend in case of an error with a +// backoff. +type RetryBackend struct { + restic.Backend + MaxTries int + Report func(string, error, time.Duration) +} + +// statically ensure that RetryBackend implements restic.Backend. +var _ restic.Backend = &RetryBackend{} + +// NewRetryBackend wraps be with a backend that retries operations after a +// backoff. report is called with a description and the error, if one occurred. +func NewRetryBackend(be restic.Backend, maxTries int, report func(string, error, time.Duration)) *RetryBackend { + return &RetryBackend{ + Backend: be, + MaxTries: maxTries, + Report: report, + } +} + +func (be *RetryBackend) retry(ctx context.Context, msg string, f func() error) error { + err := backoff.RetryNotify(f, + backoff.WithContext(backoff.WithMaxRetries(backoff.NewExponentialBackOff(), uint64(be.MaxTries)), ctx), + func(err error, d time.Duration) { + if be.Report != nil { + be.Report(msg, err, d) + } + }, + ) + + return err +} + +// Save stores the data in the backend under the given handle. +func (be *RetryBackend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error { + return be.retry(ctx, fmt.Sprintf("Save(%v)", h), func() error { + err := rd.Rewind() + if err != nil { + return err + } + + err = be.Backend.Save(ctx, h, rd) + if err == nil { + return nil + } + + debug.Log("Save(%v) failed with error, removing file: %v", h, err) + rerr := be.Backend.Remove(ctx, h) + if rerr != nil { + debug.Log("Remove(%v) returned error: %v", h, err) + } + + // return original error + return err + }) +} + +// Load returns a reader that yields the contents of the file at h at the +// given offset. If length is larger than zero, only a portion of the file +// is returned. rd must be closed after use. If an error is returned, the +// ReadCloser must be nil. +func (be *RetryBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64, consumer func(rd io.Reader) error) (err error) { + return be.retry(ctx, fmt.Sprintf("Load(%v, %v, %v)", h, length, offset), + func() error { + return be.Backend.Load(ctx, h, length, offset, consumer) + }) +} + +// Stat returns information about the File identified by h. +func (be *RetryBackend) Stat(ctx context.Context, h restic.Handle) (fi restic.FileInfo, err error) { + err = be.retry(ctx, fmt.Sprintf("Stat(%v)", h), + func() error { + var innerError error + fi, innerError = be.Backend.Stat(ctx, h) + + return innerError + }) + return fi, err +} + +// Remove removes a File with type t and name. +func (be *RetryBackend) Remove(ctx context.Context, h restic.Handle) (err error) { + return be.retry(ctx, fmt.Sprintf("Remove(%v)", h), func() error { + return be.Backend.Remove(ctx, h) + }) +} + +// Test a boolean value whether a File with the name and type exists. +func (be *RetryBackend) Test(ctx context.Context, h restic.Handle) (exists bool, err error) { + err = be.retry(ctx, fmt.Sprintf("Test(%v)", h), func() error { + var innerError error + exists, innerError = be.Backend.Test(ctx, h) + + return innerError + }) + return exists, err +} + +// List runs fn for each file in the backend which has the type t. When an +// error is returned by the underlying backend, the request is retried. When fn +// returns an error, the operation is aborted and the error is returned to the +// caller. +func (be *RetryBackend) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error { + // create a new context that we can cancel when fn returns an error, so + // that listing is aborted + listCtx, cancel := context.WithCancel(ctx) + defer cancel() + + listed := make(map[string]struct{}) // remember for which files we already ran fn + var innerErr error // remember when fn returned an error, so we can return that to the caller + + err := be.retry(listCtx, fmt.Sprintf("List(%v)", t), func() error { + return be.Backend.List(ctx, t, func(fi restic.FileInfo) error { + if _, ok := listed[fi.Name]; ok { + return nil + } + listed[fi.Name] = struct{}{} + + innerErr = fn(fi) + if innerErr != nil { + // if fn returned an error, listing is aborted, so we cancel the context + cancel() + } + return innerErr + }) + }) + + // the error fn returned takes precedence + if innerErr != nil { + return innerErr + } + + return err +} diff --git a/internal/backend/backend_retry_test.go b/internal/backend/backend_retry_test.go new file mode 100644 index 000000000..277f96756 --- /dev/null +++ b/internal/backend/backend_retry_test.go @@ -0,0 +1,249 @@ +package backend + +import ( + "bytes" + "context" + "io" + "io/ioutil" + "testing" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/mock" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/test" +) + +func TestBackendSaveRetry(t *testing.T) { + buf := bytes.NewBuffer(nil) + errcount := 0 + be := &mock.Backend{ + SaveFn: func(ctx context.Context, h restic.Handle, rd restic.RewindReader) error { + if errcount == 0 { + errcount++ + _, err := io.CopyN(ioutil.Discard, rd, 120) + if err != nil { + return err + } + + return errors.New("injected error") + } + + _, err := io.Copy(buf, rd) + return err + }, + } + + retryBackend := RetryBackend{ + Backend: be, + } + + data := test.Random(23, 5*1024*1024+11241) + err := retryBackend.Save(context.TODO(), restic.Handle{}, restic.NewByteReader(data)) + if err != nil { + t.Fatal(err) + } + + if len(data) != buf.Len() { + t.Errorf("wrong number of bytes written: want %d, got %d", len(data), buf.Len()) + } + + if !bytes.Equal(data, buf.Bytes()) { + t.Errorf("wrong data written to backend") + } +} + +func TestBackendListRetry(t *testing.T) { + const ( + ID1 = "id1" + ID2 = "id2" + ) + + retry := 0 + be := &mock.Backend{ + ListFn: func(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error { + // fail during first retry, succeed during second + retry++ + if retry == 1 { + fn(restic.FileInfo{Name: ID1}) + return errors.New("test list error") + } + fn(restic.FileInfo{Name: ID1}) + fn(restic.FileInfo{Name: ID2}) + return nil + }, + } + + retryBackend := RetryBackend{ + Backend: be, + } + + var listed []string + err := retryBackend.List(context.TODO(), restic.DataFile, func(fi restic.FileInfo) error { + listed = append(listed, fi.Name) + return nil + }) + test.OK(t, err) // assert overall success + test.Equals(t, 2, retry) // assert retried once + test.Equals(t, []string{ID1, ID2}, listed) // assert no duplicate files +} + +func TestBackendListRetryErrorFn(t *testing.T) { + var names = []string{"id1", "id2", "foo", "bar"} + + be := &mock.Backend{ + ListFn: func(ctx context.Context, tpe restic.FileType, fn func(restic.FileInfo) error) error { + t.Logf("List called for %v", tpe) + for _, name := range names { + err := fn(restic.FileInfo{Name: name}) + if err != nil { + return err + } + } + + return nil + }, + } + + retryBackend := RetryBackend{ + Backend: be, + } + + var ErrTest = errors.New("test error") + + var listed []string + run := 0 + err := retryBackend.List(context.TODO(), restic.DataFile, func(fi restic.FileInfo) error { + t.Logf("fn called for %v", fi.Name) + run++ + // return an error for the third item in the list + if run == 3 { + t.Log("returning an error") + return ErrTest + } + listed = append(listed, fi.Name) + return nil + }) + + if err != ErrTest { + t.Fatalf("wrong error returned, want %v, got %v", ErrTest, err) + } + + // processing should stop after the error was returned, so run should be 3 + if run != 3 { + t.Fatalf("function was called %d times, wanted %v", run, 3) + } + + test.Equals(t, []string{"id1", "id2"}, listed) +} + +func TestBackendListRetryErrorBackend(t *testing.T) { + var names = []string{"id1", "id2", "foo", "bar"} + + var ErrBackendTest = errors.New("test error") + + retries := 0 + be := &mock.Backend{ + ListFn: func(ctx context.Context, tpe restic.FileType, fn func(restic.FileInfo) error) error { + t.Logf("List called for %v, retries %v", tpe, retries) + retries++ + for i, name := range names { + if i == 2 { + return ErrBackendTest + } + + err := fn(restic.FileInfo{Name: name}) + if err != nil { + return err + } + } + + return nil + }, + } + + const maxRetries = 2 + retryBackend := RetryBackend{ + MaxTries: maxRetries, + Backend: be, + } + + var listed []string + err := retryBackend.List(context.TODO(), restic.DataFile, func(fi restic.FileInfo) error { + t.Logf("fn called for %v", fi.Name) + listed = append(listed, fi.Name) + return nil + }) + + if err != ErrBackendTest { + t.Fatalf("wrong error returned, want %v, got %v", ErrBackendTest, err) + } + + if retries != maxRetries+1 { + t.Fatalf("List was called %d times, wanted %v", retries, maxRetries+1) + } + + test.Equals(t, names[:2], listed) +} + +// failingReader returns an error after reading limit number of bytes +type failingReader struct { + data []byte + pos int + limit int +} + +func (r failingReader) Read(p []byte) (n int, err error) { + i := 0 + for ; i < len(p) && i+r.pos < r.limit; i++ { + p[i] = r.data[r.pos+i] + } + r.pos += i + if r.pos >= r.limit { + return i, errors.Errorf("reader reached limit of %d", r.limit) + } + return i, nil +} +func (r failingReader) Close() error { + return nil +} + +// closingReader adapts io.Reader to io.ReadCloser interface +type closingReader struct { + rd io.Reader +} + +func (r closingReader) Read(p []byte) (n int, err error) { + return r.rd.Read(p) +} +func (r closingReader) Close() error { + return nil +} + +func TestBackendLoadRetry(t *testing.T) { + data := test.Random(23, 1024) + limit := 100 + attempt := 0 + + be := mock.NewBackend() + be.OpenReaderFn = func(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) { + // returns failing reader on first invocation, good reader on subsequent invocations + attempt++ + if attempt > 1 { + return closingReader{rd: bytes.NewReader(data)}, nil + } + return failingReader{data: data, limit: limit}, nil + } + + retryBackend := RetryBackend{ + Backend: be, + } + + var buf []byte + err := retryBackend.Load(context.TODO(), restic.Handle{}, 0, 0, func(rd io.Reader) (err error) { + buf, err = ioutil.ReadAll(rd) + return err + }) + test.OK(t, err) + test.Equals(t, data, buf) + test.Equals(t, 2, attempt) +} diff --git a/internal/backend/doc.go b/internal/backend/doc.go new file mode 100644 index 000000000..daab2e2f8 --- /dev/null +++ b/internal/backend/doc.go @@ -0,0 +1,4 @@ +// Package backend provides local and remote storage for restic repositories. +// All backends need to implement the Backend interface. There is a MemBackend, +// which stores all data in a map internally and can be used for testing. +package backend diff --git a/internal/backend/foreground_solaris.go b/internal/backend/foreground_solaris.go new file mode 100644 index 000000000..501f9c1a1 --- /dev/null +++ b/internal/backend/foreground_solaris.go @@ -0,0 +1,28 @@ +package backend + +import ( + "os/exec" + "syscall" + + "github.com/restic/restic/internal/errors" +) + +// StartForeground runs cmd in the foreground, by temporarily switching to the +// new process group created for cmd. The returned function `bg` switches back +// to the previous process group. +func StartForeground(cmd *exec.Cmd) (bg func() error, err error) { + // run the command in it's own process group so that SIGINT + // is not sent to it. + cmd.SysProcAttr = &syscall.SysProcAttr{ + Setpgid: true, + } + + // start the process + err = cmd.Start() + if err != nil { + return nil, errors.Wrap(err, "cmd.Start") + } + + bg = func() error { return nil } + return bg, nil +} diff --git a/internal/backend/foreground_unix.go b/internal/backend/foreground_unix.go new file mode 100644 index 000000000..1662a0250 --- /dev/null +++ b/internal/backend/foreground_unix.go @@ -0,0 +1,79 @@ +// +build !solaris +// +build !windows + +package backend + +import ( + "os" + "os/exec" + "os/signal" + "syscall" + "unsafe" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" +) + +func tcsetpgrp(fd int, pid int) error { + _, _, errno := syscall.RawSyscall(syscall.SYS_IOCTL, uintptr(fd), + uintptr(syscall.TIOCSPGRP), uintptr(unsafe.Pointer(&pid))) + if errno == 0 { + return nil + } + + return errno +} + +// StartForeground runs cmd in the foreground, by temporarily switching to the +// new process group created for cmd. The returned function `bg` switches back +// to the previous process group. +func StartForeground(cmd *exec.Cmd) (bg func() error, err error) { + // open the TTY, we need the file descriptor + tty, err := os.OpenFile("/dev/tty", os.O_RDWR, 0) + if err != nil { + debug.Log("unable to open tty: %v", err) + bg = func() error { + return nil + } + return bg, cmd.Start() + } + + signal.Ignore(syscall.SIGTTIN) + signal.Ignore(syscall.SIGTTOU) + + // run the command in its own process group + cmd.SysProcAttr = &syscall.SysProcAttr{ + Setpgid: true, + } + + // start the process + err = cmd.Start() + if err != nil { + _ = tty.Close() + return nil, errors.Wrap(err, "cmd.Start") + } + + // move the command's process group into the foreground + prev := syscall.Getpgrp() + err = tcsetpgrp(int(tty.Fd()), cmd.Process.Pid) + if err != nil { + _ = tty.Close() + return nil, err + } + + bg = func() error { + signal.Reset(syscall.SIGTTIN) + signal.Reset(syscall.SIGTTOU) + + // reset the foreground process group + err = tcsetpgrp(int(tty.Fd()), prev) + if err != nil { + _ = tty.Close() + return err + } + + return tty.Close() + } + + return bg, nil +} diff --git a/internal/backend/foreground_windows.go b/internal/backend/foreground_windows.go new file mode 100644 index 000000000..281d5f3f8 --- /dev/null +++ b/internal/backend/foreground_windows.go @@ -0,0 +1,21 @@ +package backend + +import ( + "os/exec" + + "github.com/restic/restic/internal/errors" +) + +// StartForeground runs cmd in the foreground, by temporarily switching to the +// new process group created for cmd. The returned function `bg` switches back +// to the previous process group. +func StartForeground(cmd *exec.Cmd) (bg func() error, err error) { + // just start the process and hope for the best + err = cmd.Start() + if err != nil { + return nil, errors.Wrap(err, "cmd.Start") + } + + bg = func() error { return nil } + return bg, nil +} diff --git a/internal/backend/gs/config.go b/internal/backend/gs/config.go new file mode 100644 index 000000000..e18066ef9 --- /dev/null +++ b/internal/backend/gs/config.go @@ -0,0 +1,60 @@ +package gs + +import ( + "path" + "strings" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/options" +) + +// Config contains all configuration necessary to connect to a Google Cloud Storage +// bucket. We use Google's default application credentials to acquire an access token, so +// we don't require that calling code supply any authentication material here. +type Config struct { + ProjectID string + Bucket string + Prefix string + + Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 20)"` +} + +// NewConfig returns a new Config with the default values filled in. +func NewConfig() Config { + return Config{ + Connections: 5, + } +} + +func init() { + options.Register("gs", Config{}) +} + +// ParseConfig parses the string s and extracts the gcs config. The +// supported configuration format is gs:bucketName:/[prefix]. +func ParseConfig(s string) (interface{}, error) { + if !strings.HasPrefix(s, "gs:") { + return nil, errors.New("gs: invalid format") + } + + // strip prefix "gs:" + s = s[3:] + + // use the first entry of the path as the bucket name and the + // remainder as prefix + data := strings.SplitN(s, ":", 2) + if len(data) < 2 { + return nil, errors.New("gs: invalid format: bucket name or path not found") + } + + bucket, path := data[0], path.Clean(data[1]) + + if strings.HasPrefix(path, "/") { + path = path[1:] + } + + cfg := NewConfig() + cfg.Bucket = bucket + cfg.Prefix = path + return cfg, nil +} diff --git a/internal/backend/gs/config_test.go b/internal/backend/gs/config_test.go new file mode 100644 index 000000000..fb03e3a20 --- /dev/null +++ b/internal/backend/gs/config_test.go @@ -0,0 +1,40 @@ +package gs + +import "testing" + +var configTests = []struct { + s string + cfg Config +}{ + {"gs:bucketname:/", Config{ + Bucket: "bucketname", + Prefix: "", + Connections: 5, + }}, + {"gs:bucketname:/prefix/directory", Config{ + Bucket: "bucketname", + Prefix: "prefix/directory", + Connections: 5, + }}, + {"gs:bucketname:/prefix/directory/", Config{ + Bucket: "bucketname", + Prefix: "prefix/directory", + Connections: 5, + }}, +} + +func TestParseConfig(t *testing.T) { + for i, test := range configTests { + cfg, err := ParseConfig(test.s) + if err != nil { + t.Errorf("test %d:%s failed: %v", i, test.s, err) + continue + } + + if cfg != test.cfg { + t.Errorf("test %d:\ninput:\n %s\n wrong config, want:\n %v\ngot:\n %v", + i, test.s, test.cfg, cfg) + continue + } + } +} diff --git a/internal/backend/gs/gs.go b/internal/backend/gs/gs.go new file mode 100644 index 000000000..feea05d07 --- /dev/null +++ b/internal/backend/gs/gs.go @@ -0,0 +1,459 @@ +// Package gs provides a restic backend for Google Cloud Storage. +package gs + +import ( + "context" + "fmt" + "io" + "net/http" + "os" + "path" + "strings" + + "github.com/pkg/errors" + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/restic" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/api/googleapi" + storage "google.golang.org/api/storage/v1" +) + +// Backend stores data in a GCS bucket. +// +// The service account used to access the bucket must have these permissions: +// * storage.objects.create +// * storage.objects.delete +// * storage.objects.get +// * storage.objects.list +type Backend struct { + service *storage.Service + projectID string + sem *backend.Semaphore + bucketName string + prefix string + listMaxItems int + backend.Layout +} + +// Ensure that *Backend implements restic.Backend. +var _ restic.Backend = &Backend{} + +func getStorageService(rt http.RoundTripper) (*storage.Service, error) { + // create a new HTTP client + httpClient := &http.Client{ + Transport: rt, + } + + // create a now context with the HTTP client stored at the oauth2.HTTPClient key + ctx := context.WithValue(context.Background(), oauth2.HTTPClient, httpClient) + + // use this context + client, err := google.DefaultClient(ctx, storage.DevstorageReadWriteScope) + if err != nil { + return nil, err + } + + service, err := storage.New(client) + if err != nil { + return nil, err + } + + return service, nil +} + +const defaultListMaxItems = 1000 + +func open(cfg Config, rt http.RoundTripper) (*Backend, error) { + debug.Log("open, config %#v", cfg) + + service, err := getStorageService(rt) + if err != nil { + return nil, errors.Wrap(err, "getStorageService") + } + + sem, err := backend.NewSemaphore(cfg.Connections) + if err != nil { + return nil, err + } + + be := &Backend{ + service: service, + projectID: cfg.ProjectID, + sem: sem, + bucketName: cfg.Bucket, + prefix: cfg.Prefix, + Layout: &backend.DefaultLayout{ + Path: cfg.Prefix, + Join: path.Join, + }, + listMaxItems: defaultListMaxItems, + } + + return be, nil +} + +// Open opens the gs backend at the specified bucket. +func Open(cfg Config, rt http.RoundTripper) (restic.Backend, error) { + return open(cfg, rt) +} + +// Create opens the gs backend at the specified bucket and attempts to creates +// the bucket if it does not exist yet. +// +// The service account must have the "storage.buckets.create" permission to +// create a bucket the does not yet exist. +func Create(cfg Config, rt http.RoundTripper) (restic.Backend, error) { + be, err := open(cfg, rt) + if err != nil { + return nil, errors.Wrap(err, "open") + } + + // Try to determine if the bucket exists. If it does not, try to create it. + // + // A Get call has three typical error cases: + // + // * nil: Bucket exists and we have access to the metadata (returned). + // + // * 403: Bucket exists and we do not have access to the metadata. We + // don't have storage.buckets.get permission to the bucket, but we may + // still be able to access objects in the bucket. + // + // * 404: Bucket doesn't exist. + // + // Determining if the bucket is accessible is best-effort because the + // 403 case is ambiguous. + if _, err := be.service.Buckets.Get(be.bucketName).Do(); err != nil { + gerr, ok := err.(*googleapi.Error) + if !ok { + // Don't know what to do with this error. + return nil, errors.Wrap(err, "service.Buckets.Get") + } + + switch gerr.Code { + case 403: + // Bucket exists, but we don't know if it is + // accessible. Optimistically assume it is; if not, + // future Backend calls will fail. + debug.Log("Unable to determine if bucket %s is accessible (err %v). Continuing as if it is.", be.bucketName, err) + case 404: + // Bucket doesn't exist, try to create it. + bucket := &storage.Bucket{ + Name: be.bucketName, + } + + if _, err := be.service.Buckets.Insert(be.projectID, bucket).Do(); err != nil { + // Always an error, as the bucket definitely + // doesn't exist. + return nil, errors.Wrap(err, "service.Buckets.Insert") + } + default: + // Don't know what to do with this error. + return nil, errors.Wrap(err, "service.Buckets.Get") + } + } + + return be, nil +} + +// SetListMaxItems sets the number of list items to load per request. +func (be *Backend) SetListMaxItems(i int) { + be.listMaxItems = i +} + +// IsNotExist returns true if the error is caused by a not existing file. +func (be *Backend) IsNotExist(err error) bool { + debug.Log("IsNotExist(%T, %#v)", err, err) + + if os.IsNotExist(err) { + return true + } + + if er, ok := err.(*googleapi.Error); ok { + if er.Code == 404 { + return true + } + } + + return false +} + +// Join combines path components with slashes. +func (be *Backend) Join(p ...string) string { + return path.Join(p...) +} + +// Location returns this backend's location (the bucket name). +func (be *Backend) Location() string { + return be.Join(be.bucketName, be.prefix) +} + +// Path returns the path in the bucket that is used for this backend. +func (be *Backend) Path() string { + return be.prefix +} + +// Save stores data in the backend at the handle. +func (be *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error { + if err := h.Valid(); err != nil { + return err + } + + objName := be.Filename(h) + + debug.Log("Save %v at %v", h, objName) + + be.sem.GetToken() + + debug.Log("InsertObject(%v, %v)", be.bucketName, objName) + + // Set chunk size to zero to disable resumable uploads. + // + // With a non-zero chunk size (the default is + // googleapi.DefaultUploadChunkSize, 8MB), Insert will buffer data from + // rd in chunks of this size so it can upload these chunks in + // individual requests. + // + // This chunking allows the library to automatically handle network + // interruptions and re-upload only the last chunk rather than the full + // file. + // + // Unfortunately, this buffering doesn't play nicely with + // --limit-upload, which applies a rate limit to rd. This rate limit + // ends up only limiting the read from rd into the buffer rather than + // the network traffic itself. This results in poor network rate limit + // behavior, where individual chunks are written to the network at full + // bandwidth for several seconds, followed by several seconds of no + // network traffic as the next chunk is read through the rate limiter. + // + // By disabling chunking, rd is passed further down the request stack, + // where there is less (but some) buffering, which ultimately results + // in better rate limiting behavior. + // + // restic typically writes small blobs (4MB-30MB), so the resumable + // uploads are not providing significant benefit anyways. + cs := googleapi.ChunkSize(0) + + info, err := be.service.Objects.Insert(be.bucketName, + &storage.Object{ + Name: objName, + Size: uint64(rd.Length()), + }).Media(rd, cs).Do() + + be.sem.ReleaseToken() + + if err != nil { + debug.Log("%v: err %#v: %v", objName, err, err) + return errors.Wrap(err, "service.Objects.Insert") + } + + debug.Log("%v -> %v bytes", objName, info.Size) + return nil +} + +// wrapReader wraps an io.ReadCloser to run an additional function on Close. +type wrapReader struct { + io.ReadCloser + f func() +} + +func (wr wrapReader) Close() error { + err := wr.ReadCloser.Close() + wr.f() + return err +} + +// Load runs fn with a reader that yields the contents of the file at h at the +// given offset. +func (be *Backend) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error { + return backend.DefaultLoad(ctx, h, length, offset, be.openReader, fn) +} + +func (be *Backend) openReader(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) { + debug.Log("Load %v, length %v, offset %v from %v", h, length, offset, be.Filename(h)) + if err := h.Valid(); err != nil { + return nil, err + } + + if offset < 0 { + return nil, errors.New("offset is negative") + } + + if length < 0 { + return nil, errors.Errorf("invalid length %d", length) + } + + objName := be.Filename(h) + + be.sem.GetToken() + + var byteRange string + if length > 0 { + byteRange = fmt.Sprintf("bytes=%d-%d", offset, offset+int64(length-1)) + } else { + byteRange = fmt.Sprintf("bytes=%d-", offset) + } + + req := be.service.Objects.Get(be.bucketName, objName) + // https://cloud.google.com/storage/docs/json_api/v1/parameters#range + req.Header().Set("Range", byteRange) + res, err := req.Download() + if err != nil { + be.sem.ReleaseToken() + return nil, err + } + + closeRd := wrapReader{ + ReadCloser: res.Body, + f: func() { + debug.Log("Close()") + be.sem.ReleaseToken() + }, + } + + return closeRd, err +} + +// Stat returns information about a blob. +func (be *Backend) Stat(ctx context.Context, h restic.Handle) (bi restic.FileInfo, err error) { + debug.Log("%v", h) + + objName := be.Filename(h) + + be.sem.GetToken() + obj, err := be.service.Objects.Get(be.bucketName, objName).Do() + be.sem.ReleaseToken() + + if err != nil { + debug.Log("GetObject() err %v", err) + return restic.FileInfo{}, errors.Wrap(err, "service.Objects.Get") + } + + return restic.FileInfo{Size: int64(obj.Size), Name: h.Name}, nil +} + +// Test returns true if a blob of the given type and name exists in the backend. +func (be *Backend) Test(ctx context.Context, h restic.Handle) (bool, error) { + found := false + objName := be.Filename(h) + + be.sem.GetToken() + _, err := be.service.Objects.Get(be.bucketName, objName).Do() + be.sem.ReleaseToken() + + if err == nil { + found = true + } + // If error, then not found + return found, nil +} + +// Remove removes the blob with the given name and type. +func (be *Backend) Remove(ctx context.Context, h restic.Handle) error { + objName := be.Filename(h) + + be.sem.GetToken() + err := be.service.Objects.Delete(be.bucketName, objName).Do() + be.sem.ReleaseToken() + + if er, ok := err.(*googleapi.Error); ok { + if er.Code == 404 { + err = nil + } + } + + debug.Log("Remove(%v) at %v -> err %v", h, objName, err) + return errors.Wrap(err, "client.RemoveObject") +} + +// List runs fn for each file in the backend which has the type t. When an +// error occurs (or fn returns an error), List stops and returns it. +func (be *Backend) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error { + debug.Log("listing %v", t) + + prefix, _ := be.Basedir(t) + + // make sure prefix ends with a slash + if !strings.HasSuffix(prefix, "/") { + prefix += "/" + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + listReq := be.service.Objects.List(be.bucketName).Context(ctx).Prefix(prefix).MaxResults(int64(be.listMaxItems)) + for { + be.sem.GetToken() + obj, err := listReq.Do() + be.sem.ReleaseToken() + + if err != nil { + return err + } + + debug.Log("returned %v items", len(obj.Items)) + + for _, item := range obj.Items { + m := strings.TrimPrefix(item.Name, prefix) + if m == "" { + continue + } + + if ctx.Err() != nil { + return ctx.Err() + } + + fi := restic.FileInfo{ + Name: path.Base(m), + Size: int64(item.Size), + } + + err := fn(fi) + if err != nil { + return err + } + + if ctx.Err() != nil { + return ctx.Err() + } + } + + if obj.NextPageToken == "" { + break + } + listReq.PageToken(obj.NextPageToken) + } + + return ctx.Err() +} + +// Remove keys for a specified backend type. +func (be *Backend) removeKeys(ctx context.Context, t restic.FileType) error { + return be.List(ctx, t, func(fi restic.FileInfo) error { + return be.Remove(ctx, restic.Handle{Type: t, Name: fi.Name}) + }) +} + +// Delete removes all restic keys in the bucket. It will not remove the bucket itself. +func (be *Backend) Delete(ctx context.Context) error { + alltypes := []restic.FileType{ + restic.DataFile, + restic.KeyFile, + restic.LockFile, + restic.SnapshotFile, + restic.IndexFile} + + for _, t := range alltypes { + err := be.removeKeys(ctx, t) + if err != nil { + return nil + } + } + + return be.Remove(ctx, restic.Handle{Type: restic.ConfigFile}) +} + +// Close does nothing. +func (be *Backend) Close() error { return nil } diff --git a/internal/backend/gs/gs_test.go b/internal/backend/gs/gs_test.go new file mode 100644 index 000000000..27ff809ff --- /dev/null +++ b/internal/backend/gs/gs_test.go @@ -0,0 +1,122 @@ +package gs_test + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/backend/gs" + "github.com/restic/restic/internal/backend/test" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func newGSTestSuite(t testing.TB) *test.Suite { + tr, err := backend.Transport(backend.TransportOptions{}) + if err != nil { + t.Fatalf("cannot create transport for tests: %v", err) + } + + return &test.Suite{ + // do not use excessive data + MinimalData: true, + + // NewConfig returns a config for a new temporary backend that will be used in tests. + NewConfig: func() (interface{}, error) { + gscfg, err := gs.ParseConfig(os.Getenv("RESTIC_TEST_GS_REPOSITORY")) + if err != nil { + return nil, err + } + + cfg := gscfg.(gs.Config) + cfg.ProjectID = os.Getenv("RESTIC_TEST_GS_PROJECT_ID") + cfg.Prefix = fmt.Sprintf("test-%d", time.Now().UnixNano()) + return cfg, nil + }, + + // CreateFn is a function that creates a temporary repository for the tests. + Create: func(config interface{}) (restic.Backend, error) { + cfg := config.(gs.Config) + + be, err := gs.Create(cfg, tr) + if err != nil { + return nil, err + } + + exists, err := be.Test(context.TODO(), restic.Handle{Type: restic.ConfigFile}) + if err != nil { + return nil, err + } + + if exists { + return nil, errors.New("config already exists") + } + + return be, nil + }, + + // OpenFn is a function that opens a previously created temporary repository. + Open: func(config interface{}) (restic.Backend, error) { + cfg := config.(gs.Config) + return gs.Open(cfg, tr) + }, + + // CleanupFn removes data created during the tests. + Cleanup: func(config interface{}) error { + cfg := config.(gs.Config) + + be, err := gs.Open(cfg, tr) + if err != nil { + return err + } + + return be.Delete(context.TODO()) + }, + } +} + +func TestBackendGS(t *testing.T) { + defer func() { + if t.Skipped() { + rtest.SkipDisallowed(t, "restic/backend/gs.TestBackendGS") + } + }() + + vars := []string{ + "GOOGLE_APPLICATION_CREDENTIALS", + "RESTIC_TEST_GS_PROJECT_ID", + "RESTIC_TEST_GS_REPOSITORY", + } + + for _, v := range vars { + if os.Getenv(v) == "" { + t.Skipf("environment variable %v not set", v) + return + } + } + + t.Logf("run tests") + newGSTestSuite(t).RunTests(t) +} + +func BenchmarkBackendGS(t *testing.B) { + vars := []string{ + "GOOGLE_APPLICATION_CREDENTIALS", + "RESTIC_TEST_GS_PROJECT_ID", + "RESTIC_TEST_GS_REPOSITORY", + } + + for _, v := range vars { + if os.Getenv(v) == "" { + t.Skipf("environment variable %v not set", v) + return + } + } + + t.Logf("run tests") + newGSTestSuite(t).RunBenchmarks(t) +} diff --git a/internal/backend/http_transport.go b/internal/backend/http_transport.go new file mode 100644 index 000000000..30d25ba78 --- /dev/null +++ b/internal/backend/http_transport.go @@ -0,0 +1,113 @@ +package backend + +import ( + "crypto/tls" + "crypto/x509" + "encoding/pem" + "io/ioutil" + "net" + "net/http" + "strings" + "time" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" +) + +// TransportOptions collects various options which can be set for an HTTP based +// transport. +type TransportOptions struct { + // contains filenames of PEM encoded root certificates to trust + RootCertFilenames []string + + // contains the name of a file containing the TLS client certificate and private key in PEM format + TLSClientCertKeyFilename string +} + +// readPEMCertKey reads a file and returns the PEM encoded certificate and key +// blocks. +func readPEMCertKey(filename string) (certs []byte, key []byte, err error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + return nil, nil, errors.Wrap(err, "ReadFile") + } + + var block *pem.Block + for { + if len(data) == 0 { + break + } + block, data = pem.Decode(data) + if block == nil { + break + } + + switch { + case strings.HasSuffix(block.Type, "CERTIFICATE"): + certs = append(certs, pem.EncodeToMemory(block)...) + case strings.HasSuffix(block.Type, "PRIVATE KEY"): + if key != nil { + return nil, nil, errors.Errorf("error loading TLS cert and key from %v: more than one private key found", filename) + } + key = pem.EncodeToMemory(block) + default: + return nil, nil, errors.Errorf("error loading TLS cert and key from %v: unknown block type %v found", filename, block.Type) + } + } + + return certs, key, nil +} + +// Transport returns a new http.RoundTripper with default settings applied. If +// a custom rootCertFilename is non-empty, it must point to a valid PEM file, +// otherwise the function will return an error. +func Transport(opts TransportOptions) (http.RoundTripper, error) { + // copied from net/http + tr := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + MaxIdleConnsPerHost: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + TLSClientConfig: &tls.Config{}, + } + + if opts.TLSClientCertKeyFilename != "" { + certs, key, err := readPEMCertKey(opts.TLSClientCertKeyFilename) + if err != nil { + return nil, err + } + + crt, err := tls.X509KeyPair(certs, key) + if err != nil { + return nil, errors.Errorf("parse TLS client cert or key: %v", err) + } + tr.TLSClientConfig.Certificates = []tls.Certificate{crt} + } + + if opts.RootCertFilenames != nil { + pool := x509.NewCertPool() + for _, filename := range opts.RootCertFilenames { + if filename == "" { + return nil, errors.Errorf("empty filename for root certificate supplied") + } + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, errors.Errorf("unable to read root certificate: %v", err) + } + if ok := pool.AppendCertsFromPEM(b); !ok { + return nil, errors.Errorf("cannot parse root certificate from %q", filename) + } + } + tr.TLSClientConfig.RootCAs = pool + } + + // wrap in the debug round tripper (if active) + return debug.RoundTripper(tr), nil +} diff --git a/internal/backend/layout.go b/internal/backend/layout.go new file mode 100644 index 000000000..cc376da03 --- /dev/null +++ b/internal/backend/layout.go @@ -0,0 +1,168 @@ +package backend + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/restic" +) + +// Layout computes paths for file name storage. +type Layout interface { + Filename(restic.Handle) string + Dirname(restic.Handle) string + Basedir(restic.FileType) (dir string, subdirs bool) + Paths() []string + Name() string +} + +// Filesystem is the abstraction of a file system used for a backend. +type Filesystem interface { + Join(...string) string + ReadDir(string) ([]os.FileInfo, error) + IsNotExist(error) bool +} + +// ensure statically that *LocalFilesystem implements Filesystem. +var _ Filesystem = &LocalFilesystem{} + +// LocalFilesystem implements Filesystem in a local path. +type LocalFilesystem struct { +} + +// ReadDir returns all entries of a directory. +func (l *LocalFilesystem) ReadDir(dir string) ([]os.FileInfo, error) { + f, err := fs.Open(dir) + if err != nil { + return nil, err + } + + entries, err := f.Readdir(-1) + if err != nil { + return nil, errors.Wrap(err, "Readdir") + } + + err = f.Close() + if err != nil { + return nil, errors.Wrap(err, "Close") + } + + return entries, nil +} + +// Join combines several path components to one. +func (l *LocalFilesystem) Join(paths ...string) string { + return filepath.Join(paths...) +} + +// IsNotExist returns true for errors that are caused by not existing files. +func (l *LocalFilesystem) IsNotExist(err error) bool { + return os.IsNotExist(err) +} + +var backendFilenameLength = len(restic.ID{}) * 2 +var backendFilename = regexp.MustCompile(fmt.Sprintf("^[a-fA-F0-9]{%d}$", backendFilenameLength)) + +func hasBackendFile(fs Filesystem, dir string) (bool, error) { + entries, err := fs.ReadDir(dir) + if err != nil && fs.IsNotExist(errors.Cause(err)) { + return false, nil + } + + if err != nil { + return false, errors.Wrap(err, "ReadDir") + } + + for _, e := range entries { + if backendFilename.MatchString(e.Name()) { + return true, nil + } + } + + return false, nil +} + +// ErrLayoutDetectionFailed is returned by DetectLayout() when the layout +// cannot be detected automatically. +var ErrLayoutDetectionFailed = errors.New("auto-detecting the filesystem layout failed") + +// DetectLayout tries to find out which layout is used in a local (or sftp) +// filesystem at the given path. If repo is nil, an instance of LocalFilesystem +// is used. +func DetectLayout(repo Filesystem, dir string) (Layout, error) { + debug.Log("detect layout at %v", dir) + if repo == nil { + repo = &LocalFilesystem{} + } + + // key file in the "keys" dir (DefaultLayout) + foundKeysFile, err := hasBackendFile(repo, repo.Join(dir, defaultLayoutPaths[restic.KeyFile])) + if err != nil { + return nil, err + } + + // key file in the "key" dir (S3LegacyLayout) + foundKeyFile, err := hasBackendFile(repo, repo.Join(dir, s3LayoutPaths[restic.KeyFile])) + if err != nil { + return nil, err + } + + if foundKeysFile && !foundKeyFile { + debug.Log("found default layout at %v", dir) + return &DefaultLayout{ + Path: dir, + Join: repo.Join, + }, nil + } + + if foundKeyFile && !foundKeysFile { + debug.Log("found s3 layout at %v", dir) + return &S3LegacyLayout{ + Path: dir, + Join: repo.Join, + }, nil + } + + debug.Log("layout detection failed") + return nil, ErrLayoutDetectionFailed +} + +// ParseLayout parses the config string and returns a Layout. When layout is +// the empty string, DetectLayout is used. If that fails, defaultLayout is used. +func ParseLayout(repo Filesystem, layout, defaultLayout, path string) (l Layout, err error) { + debug.Log("parse layout string %q for backend at %v", layout, path) + switch layout { + case "default": + l = &DefaultLayout{ + Path: path, + Join: repo.Join, + } + case "s3legacy": + l = &S3LegacyLayout{ + Path: path, + Join: repo.Join, + } + case "": + l, err = DetectLayout(repo, path) + + // use the default layout if auto detection failed + if errors.Cause(err) == ErrLayoutDetectionFailed && defaultLayout != "" { + debug.Log("error: %v, use default layout %v", err, defaultLayout) + return ParseLayout(repo, defaultLayout, "", path) + } + + if err != nil { + return nil, err + } + debug.Log("layout detected: %v", l) + default: + return nil, errors.Errorf("unknown backend layout string %q, may be one of: default, s3legacy", layout) + } + + return l, nil +} diff --git a/internal/backend/layout_default.go b/internal/backend/layout_default.go new file mode 100644 index 000000000..66ddf686c --- /dev/null +++ b/internal/backend/layout_default.go @@ -0,0 +1,79 @@ +package backend + +import ( + "encoding/hex" + + "github.com/restic/restic/internal/restic" +) + +// DefaultLayout implements the default layout for local and sftp backends, as +// described in the Design document. The `data` directory has one level of +// subdirs, two characters each (taken from the first two characters of the +// file name). +type DefaultLayout struct { + Path string + Join func(...string) string +} + +var defaultLayoutPaths = map[restic.FileType]string{ + restic.DataFile: "data", + restic.SnapshotFile: "snapshots", + restic.IndexFile: "index", + restic.LockFile: "locks", + restic.KeyFile: "keys", +} + +func (l *DefaultLayout) String() string { + return "" +} + +// Name returns the name for this layout. +func (l *DefaultLayout) Name() string { + return "default" +} + +// Dirname returns the directory path for a given file type and name. +func (l *DefaultLayout) Dirname(h restic.Handle) string { + p := defaultLayoutPaths[h.Type] + + if h.Type == restic.DataFile && len(h.Name) > 2 { + p = l.Join(p, h.Name[:2]) + "/" + } + + return l.Join(l.Path, p) + "/" +} + +// Filename returns a path to a file, including its name. +func (l *DefaultLayout) Filename(h restic.Handle) string { + name := h.Name + if h.Type == restic.ConfigFile { + return l.Join(l.Path, "config") + } + + return l.Join(l.Dirname(h), name) +} + +// Paths returns all directory names needed for a repo. +func (l *DefaultLayout) Paths() (dirs []string) { + for _, p := range defaultLayoutPaths { + dirs = append(dirs, l.Join(l.Path, p)) + } + + // also add subdirs + for i := 0; i < 256; i++ { + subdir := hex.EncodeToString([]byte{byte(i)}) + dirs = append(dirs, l.Join(l.Path, defaultLayoutPaths[restic.DataFile], subdir)) + } + + return dirs +} + +// Basedir returns the base dir name for type t. +func (l *DefaultLayout) Basedir(t restic.FileType) (dirname string, subdirs bool) { + if t == restic.DataFile { + subdirs = true + } + + dirname = l.Join(l.Path, defaultLayoutPaths[t]) + return +} diff --git a/internal/backend/layout_rest.go b/internal/backend/layout_rest.go new file mode 100644 index 000000000..1d65828a8 --- /dev/null +++ b/internal/backend/layout_rest.go @@ -0,0 +1,54 @@ +package backend + +import "github.com/restic/restic/internal/restic" + +// RESTLayout implements the default layout for the REST protocol. +type RESTLayout struct { + URL string + Path string + Join func(...string) string +} + +var restLayoutPaths = defaultLayoutPaths + +func (l *RESTLayout) String() string { + return "" +} + +// Name returns the name for this layout. +func (l *RESTLayout) Name() string { + return "rest" +} + +// Dirname returns the directory path for a given file type and name. +func (l *RESTLayout) Dirname(h restic.Handle) string { + if h.Type == restic.ConfigFile { + return l.URL + l.Join(l.Path, "/") + } + + return l.URL + l.Join(l.Path, "/", restLayoutPaths[h.Type]) + "/" +} + +// Filename returns a path to a file, including its name. +func (l *RESTLayout) Filename(h restic.Handle) string { + name := h.Name + + if h.Type == restic.ConfigFile { + name = "config" + } + + return l.URL + l.Join(l.Path, "/", restLayoutPaths[h.Type], name) +} + +// Paths returns all directory names +func (l *RESTLayout) Paths() (dirs []string) { + for _, p := range restLayoutPaths { + dirs = append(dirs, l.URL+l.Join(l.Path, p)) + } + return dirs +} + +// Basedir returns the base dir name for files of type t. +func (l *RESTLayout) Basedir(t restic.FileType) (dirname string, subdirs bool) { + return l.URL + l.Join(l.Path, restLayoutPaths[t]), false +} diff --git a/internal/backend/layout_s3legacy.go b/internal/backend/layout_s3legacy.go new file mode 100644 index 000000000..0fc0e38bc --- /dev/null +++ b/internal/backend/layout_s3legacy.go @@ -0,0 +1,77 @@ +package backend + +import "github.com/restic/restic/internal/restic" + +// S3LegacyLayout implements the old layout used for s3 cloud storage backends, as +// described in the Design document. +type S3LegacyLayout struct { + URL string + Path string + Join func(...string) string +} + +var s3LayoutPaths = map[restic.FileType]string{ + restic.DataFile: "data", + restic.SnapshotFile: "snapshot", + restic.IndexFile: "index", + restic.LockFile: "lock", + restic.KeyFile: "key", +} + +func (l *S3LegacyLayout) String() string { + return "" +} + +// Name returns the name for this layout. +func (l *S3LegacyLayout) Name() string { + return "s3legacy" +} + +// join calls Join with the first empty elements removed. +func (l *S3LegacyLayout) join(url string, items ...string) string { + for len(items) > 0 && items[0] == "" { + items = items[1:] + } + + path := l.Join(items...) + if path == "" || path[0] != '/' { + if url != "" && url[len(url)-1] != '/' { + url += "/" + } + } + + return url + path +} + +// Dirname returns the directory path for a given file type and name. +func (l *S3LegacyLayout) Dirname(h restic.Handle) string { + if h.Type == restic.ConfigFile { + return l.URL + l.Join(l.Path, "/") + } + + return l.join(l.URL, l.Path, s3LayoutPaths[h.Type]) + "/" +} + +// Filename returns a path to a file, including its name. +func (l *S3LegacyLayout) Filename(h restic.Handle) string { + name := h.Name + + if h.Type == restic.ConfigFile { + name = "config" + } + + return l.join(l.URL, l.Path, s3LayoutPaths[h.Type], name) +} + +// Paths returns all directory names +func (l *S3LegacyLayout) Paths() (dirs []string) { + for _, p := range s3LayoutPaths { + dirs = append(dirs, l.Join(l.Path, p)) + } + return dirs +} + +// Basedir returns the base dir name for type t. +func (l *S3LegacyLayout) Basedir(t restic.FileType) (dirname string, subdirs bool) { + return l.Join(l.Path, s3LayoutPaths[t]), false +} diff --git a/internal/backend/layout_test.go b/internal/backend/layout_test.go new file mode 100644 index 000000000..0c52b32bc --- /dev/null +++ b/internal/backend/layout_test.go @@ -0,0 +1,450 @@ +package backend + +import ( + "fmt" + "path" + "path/filepath" + "reflect" + "sort" + "testing" + + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func TestDefaultLayout(t *testing.T) { + tempdir, cleanup := rtest.TempDir(t) + defer cleanup() + + var tests = []struct { + path string + join func(...string) string + restic.Handle + filename string + }{ + { + tempdir, + filepath.Join, + restic.Handle{Type: restic.DataFile, Name: "0123456"}, + filepath.Join(tempdir, "data", "01", "0123456"), + }, + { + tempdir, + filepath.Join, + restic.Handle{Type: restic.ConfigFile, Name: "CFG"}, + filepath.Join(tempdir, "config"), + }, + { + tempdir, + filepath.Join, + restic.Handle{Type: restic.SnapshotFile, Name: "123456"}, + filepath.Join(tempdir, "snapshots", "123456"), + }, + { + tempdir, + filepath.Join, + restic.Handle{Type: restic.IndexFile, Name: "123456"}, + filepath.Join(tempdir, "index", "123456"), + }, + { + tempdir, + filepath.Join, + restic.Handle{Type: restic.LockFile, Name: "123456"}, + filepath.Join(tempdir, "locks", "123456"), + }, + { + tempdir, + filepath.Join, + restic.Handle{Type: restic.KeyFile, Name: "123456"}, + filepath.Join(tempdir, "keys", "123456"), + }, + { + "", + path.Join, + restic.Handle{Type: restic.DataFile, Name: "0123456"}, + "data/01/0123456", + }, + { + "", + path.Join, + restic.Handle{Type: restic.ConfigFile, Name: "CFG"}, + "config", + }, + { + "", + path.Join, + restic.Handle{Type: restic.SnapshotFile, Name: "123456"}, + "snapshots/123456", + }, + { + "", + path.Join, + restic.Handle{Type: restic.IndexFile, Name: "123456"}, + "index/123456", + }, + { + "", + path.Join, + restic.Handle{Type: restic.LockFile, Name: "123456"}, + "locks/123456", + }, + { + "", + path.Join, + restic.Handle{Type: restic.KeyFile, Name: "123456"}, + "keys/123456", + }, + } + + t.Run("Paths", func(t *testing.T) { + l := &DefaultLayout{ + Path: tempdir, + Join: filepath.Join, + } + + dirs := l.Paths() + + want := []string{ + filepath.Join(tempdir, "data"), + filepath.Join(tempdir, "snapshots"), + filepath.Join(tempdir, "index"), + filepath.Join(tempdir, "locks"), + filepath.Join(tempdir, "keys"), + } + + for i := 0; i < 256; i++ { + want = append(want, filepath.Join(tempdir, "data", fmt.Sprintf("%02x", i))) + } + + sort.Sort(sort.StringSlice(want)) + sort.Sort(sort.StringSlice(dirs)) + + if !reflect.DeepEqual(dirs, want) { + t.Fatalf("wrong paths returned, want:\n %v\ngot:\n %v", want, dirs) + } + }) + + for _, test := range tests { + t.Run(fmt.Sprintf("%v/%v", test.Type, test.Handle.Name), func(t *testing.T) { + l := &DefaultLayout{ + Path: test.path, + Join: test.join, + } + + filename := l.Filename(test.Handle) + if filename != test.filename { + t.Fatalf("wrong filename, want %v, got %v", test.filename, filename) + } + }) + } +} + +func TestRESTLayout(t *testing.T) { + path, cleanup := rtest.TempDir(t) + defer cleanup() + + var tests = []struct { + restic.Handle + filename string + }{ + { + restic.Handle{Type: restic.DataFile, Name: "0123456"}, + filepath.Join(path, "data", "0123456"), + }, + { + restic.Handle{Type: restic.ConfigFile, Name: "CFG"}, + filepath.Join(path, "config"), + }, + { + restic.Handle{Type: restic.SnapshotFile, Name: "123456"}, + filepath.Join(path, "snapshots", "123456"), + }, + { + restic.Handle{Type: restic.IndexFile, Name: "123456"}, + filepath.Join(path, "index", "123456"), + }, + { + restic.Handle{Type: restic.LockFile, Name: "123456"}, + filepath.Join(path, "locks", "123456"), + }, + { + restic.Handle{Type: restic.KeyFile, Name: "123456"}, + filepath.Join(path, "keys", "123456"), + }, + } + + l := &RESTLayout{ + Path: path, + Join: filepath.Join, + } + + t.Run("Paths", func(t *testing.T) { + dirs := l.Paths() + + want := []string{ + filepath.Join(path, "data"), + filepath.Join(path, "snapshots"), + filepath.Join(path, "index"), + filepath.Join(path, "locks"), + filepath.Join(path, "keys"), + } + + sort.Sort(sort.StringSlice(want)) + sort.Sort(sort.StringSlice(dirs)) + + if !reflect.DeepEqual(dirs, want) { + t.Fatalf("wrong paths returned, want:\n %v\ngot:\n %v", want, dirs) + } + }) + + for _, test := range tests { + t.Run(fmt.Sprintf("%v/%v", test.Type, test.Handle.Name), func(t *testing.T) { + filename := l.Filename(test.Handle) + if filename != test.filename { + t.Fatalf("wrong filename, want %v, got %v", test.filename, filename) + } + }) + } +} + +func TestRESTLayoutURLs(t *testing.T) { + var tests = []struct { + l Layout + h restic.Handle + fn string + dir string + }{ + { + &RESTLayout{URL: "https://hostname.foo", Path: "", Join: path.Join}, + restic.Handle{Type: restic.DataFile, Name: "foobar"}, + "https://hostname.foo/data/foobar", + "https://hostname.foo/data/", + }, + { + &RESTLayout{URL: "https://hostname.foo:1234/prefix/repo", Path: "/", Join: path.Join}, + restic.Handle{Type: restic.LockFile, Name: "foobar"}, + "https://hostname.foo:1234/prefix/repo/locks/foobar", + "https://hostname.foo:1234/prefix/repo/locks/", + }, + { + &RESTLayout{URL: "https://hostname.foo:1234/prefix/repo", Path: "/", Join: path.Join}, + restic.Handle{Type: restic.ConfigFile, Name: "foobar"}, + "https://hostname.foo:1234/prefix/repo/config", + "https://hostname.foo:1234/prefix/repo/", + }, + { + &S3LegacyLayout{URL: "https://hostname.foo", Path: "/", Join: path.Join}, + restic.Handle{Type: restic.DataFile, Name: "foobar"}, + "https://hostname.foo/data/foobar", + "https://hostname.foo/data/", + }, + { + &S3LegacyLayout{URL: "https://hostname.foo:1234/prefix/repo", Path: "", Join: path.Join}, + restic.Handle{Type: restic.LockFile, Name: "foobar"}, + "https://hostname.foo:1234/prefix/repo/lock/foobar", + "https://hostname.foo:1234/prefix/repo/lock/", + }, + { + &S3LegacyLayout{URL: "https://hostname.foo:1234/prefix/repo", Path: "/", Join: path.Join}, + restic.Handle{Type: restic.ConfigFile, Name: "foobar"}, + "https://hostname.foo:1234/prefix/repo/config", + "https://hostname.foo:1234/prefix/repo/", + }, + { + &S3LegacyLayout{URL: "", Path: "", Join: path.Join}, + restic.Handle{Type: restic.DataFile, Name: "foobar"}, + "data/foobar", + "data/", + }, + { + &S3LegacyLayout{URL: "", Path: "", Join: path.Join}, + restic.Handle{Type: restic.LockFile, Name: "foobar"}, + "lock/foobar", + "lock/", + }, + { + &S3LegacyLayout{URL: "", Path: "/", Join: path.Join}, + restic.Handle{Type: restic.ConfigFile, Name: "foobar"}, + "/config", + "/", + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%T", test.l), func(t *testing.T) { + fn := test.l.Filename(test.h) + if fn != test.fn { + t.Fatalf("wrong filename, want %v, got %v", test.fn, fn) + } + + dir := test.l.Dirname(test.h) + if dir != test.dir { + t.Fatalf("wrong dirname, want %v, got %v", test.dir, dir) + } + }) + } +} + +func TestS3LegacyLayout(t *testing.T) { + path, cleanup := rtest.TempDir(t) + defer cleanup() + + var tests = []struct { + restic.Handle + filename string + }{ + { + restic.Handle{Type: restic.DataFile, Name: "0123456"}, + filepath.Join(path, "data", "0123456"), + }, + { + restic.Handle{Type: restic.ConfigFile, Name: "CFG"}, + filepath.Join(path, "config"), + }, + { + restic.Handle{Type: restic.SnapshotFile, Name: "123456"}, + filepath.Join(path, "snapshot", "123456"), + }, + { + restic.Handle{Type: restic.IndexFile, Name: "123456"}, + filepath.Join(path, "index", "123456"), + }, + { + restic.Handle{Type: restic.LockFile, Name: "123456"}, + filepath.Join(path, "lock", "123456"), + }, + { + restic.Handle{Type: restic.KeyFile, Name: "123456"}, + filepath.Join(path, "key", "123456"), + }, + } + + l := &S3LegacyLayout{ + Path: path, + Join: filepath.Join, + } + + t.Run("Paths", func(t *testing.T) { + dirs := l.Paths() + + want := []string{ + filepath.Join(path, "data"), + filepath.Join(path, "snapshot"), + filepath.Join(path, "index"), + filepath.Join(path, "lock"), + filepath.Join(path, "key"), + } + + sort.Sort(sort.StringSlice(want)) + sort.Sort(sort.StringSlice(dirs)) + + if !reflect.DeepEqual(dirs, want) { + t.Fatalf("wrong paths returned, want:\n %v\ngot:\n %v", want, dirs) + } + }) + + for _, test := range tests { + t.Run(fmt.Sprintf("%v/%v", test.Type, test.Handle.Name), func(t *testing.T) { + filename := l.Filename(test.Handle) + if filename != test.filename { + t.Fatalf("wrong filename, want %v, got %v", test.filename, filename) + } + }) + } +} + +func TestDetectLayout(t *testing.T) { + path, cleanup := rtest.TempDir(t) + defer cleanup() + + var tests = []struct { + filename string + want string + }{ + {"repo-layout-default.tar.gz", "*backend.DefaultLayout"}, + {"repo-layout-s3legacy.tar.gz", "*backend.S3LegacyLayout"}, + } + + var fs = &LocalFilesystem{} + for _, test := range tests { + for _, fs := range []Filesystem{fs, nil} { + t.Run(fmt.Sprintf("%v/fs-%T", test.filename, fs), func(t *testing.T) { + rtest.SetupTarTestFixture(t, path, filepath.Join("testdata", test.filename)) + + layout, err := DetectLayout(fs, filepath.Join(path, "repo")) + if err != nil { + t.Fatal(err) + } + + if layout == nil { + t.Fatal("wanted some layout, but detect returned nil") + } + + layoutName := fmt.Sprintf("%T", layout) + if layoutName != test.want { + t.Fatalf("want layout %v, got %v", test.want, layoutName) + } + + rtest.RemoveAll(t, filepath.Join(path, "repo")) + }) + } + } +} + +func TestParseLayout(t *testing.T) { + path, cleanup := rtest.TempDir(t) + defer cleanup() + + var tests = []struct { + layoutName string + defaultLayoutName string + want string + }{ + {"default", "", "*backend.DefaultLayout"}, + {"s3legacy", "", "*backend.S3LegacyLayout"}, + {"", "", "*backend.DefaultLayout"}, + } + + rtest.SetupTarTestFixture(t, path, filepath.Join("testdata", "repo-layout-default.tar.gz")) + + for _, test := range tests { + t.Run(test.layoutName, func(t *testing.T) { + layout, err := ParseLayout(&LocalFilesystem{}, test.layoutName, test.defaultLayoutName, filepath.Join(path, "repo")) + if err != nil { + t.Fatal(err) + } + + if layout == nil { + t.Fatal("wanted some layout, but detect returned nil") + } + + // test that the functions work (and don't panic) + _ = layout.Dirname(restic.Handle{Type: restic.DataFile}) + _ = layout.Filename(restic.Handle{Type: restic.DataFile, Name: "1234"}) + _ = layout.Paths() + + layoutName := fmt.Sprintf("%T", layout) + if layoutName != test.want { + t.Fatalf("want layout %v, got %v", test.want, layoutName) + } + }) + } +} + +func TestParseLayoutInvalid(t *testing.T) { + path, cleanup := rtest.TempDir(t) + defer cleanup() + + var invalidNames = []string{ + "foo", "bar", "local", + } + + for _, name := range invalidNames { + t.Run(name, func(t *testing.T) { + layout, err := ParseLayout(nil, name, "", path) + if err == nil { + t.Fatalf("expected error not found for layout name %v, layout is %v", name, layout) + } + }) + } +} diff --git a/internal/backend/local/config.go b/internal/backend/local/config.go new file mode 100644 index 000000000..13b7f67aa --- /dev/null +++ b/internal/backend/local/config.go @@ -0,0 +1,27 @@ +package local + +import ( + "strings" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/options" +) + +// Config holds all information needed to open a local repository. +type Config struct { + Path string + Layout string `option:"layout" help:"use this backend directory layout (default: auto-detect)"` +} + +func init() { + options.Register("local", Config{}) +} + +// ParseConfig parses a local backend config. +func ParseConfig(cfg string) (interface{}, error) { + if !strings.HasPrefix(cfg, "local:") { + return nil, errors.New(`invalid format, prefix "local" not found`) + } + + return Config{Path: cfg[6:]}, nil +} diff --git a/internal/backend/local/doc.go b/internal/backend/local/doc.go new file mode 100644 index 000000000..a2b160c4c --- /dev/null +++ b/internal/backend/local/doc.go @@ -0,0 +1,2 @@ +// Package local implements repository storage in a local directory. +package local diff --git a/internal/backend/local/layout_test.go b/internal/backend/local/layout_test.go new file mode 100644 index 000000000..f2531a332 --- /dev/null +++ b/internal/backend/local/layout_test.go @@ -0,0 +1,86 @@ +package local + +import ( + "context" + "path/filepath" + "testing" + + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func TestLayout(t *testing.T) { + path, cleanup := rtest.TempDir(t) + defer cleanup() + + var tests = []struct { + filename string + layout string + failureExpected bool + datafiles map[string]bool + }{ + {"repo-layout-default.tar.gz", "", false, map[string]bool{ + "aa464e9fd598fe4202492ee317ffa728e82fa83a1de1a61996e5bd2d6651646c": false, + "fc919a3b421850f6fa66ad22ebcf91e433e79ffef25becf8aef7c7b1eca91683": false, + "c089d62788da14f8b7cbf77188305c0874906f0b73d3fce5a8869050e8d0c0e1": false, + }}, + {"repo-layout-s3legacy.tar.gz", "", false, map[string]bool{ + "fc919a3b421850f6fa66ad22ebcf91e433e79ffef25becf8aef7c7b1eca91683": false, + "c089d62788da14f8b7cbf77188305c0874906f0b73d3fce5a8869050e8d0c0e1": false, + "aa464e9fd598fe4202492ee317ffa728e82fa83a1de1a61996e5bd2d6651646c": false, + }}, + } + + for _, test := range tests { + t.Run(test.filename, func(t *testing.T) { + rtest.SetupTarTestFixture(t, path, filepath.Join("..", "testdata", test.filename)) + + repo := filepath.Join(path, "repo") + be, err := Open(Config{ + Path: repo, + Layout: test.layout, + }) + if err != nil { + t.Fatal(err) + } + + if be == nil { + t.Fatalf("Open() returned nil but no error") + } + + datafiles := make(map[string]bool) + err = be.List(context.TODO(), restic.DataFile, func(fi restic.FileInfo) error { + datafiles[fi.Name] = false + return nil + }) + + if err != nil { + t.Fatalf("List() returned error %v", err) + } + + if len(datafiles) == 0 { + t.Errorf("List() returned zero data files") + } + + for id := range test.datafiles { + if _, ok := datafiles[id]; !ok { + t.Errorf("datafile with id %v not found", id) + } + + datafiles[id] = true + } + + for id, v := range datafiles { + if !v { + t.Errorf("unexpected id %v found", id) + } + } + + if err = be.Close(); err != nil { + t.Errorf("Close() returned error %v", err) + } + + rtest.RemoveAll(t, filepath.Join(path, "repo")) + }) + } +} diff --git a/internal/backend/local/local.go b/internal/backend/local/local.go new file mode 100644 index 000000000..6792241e1 --- /dev/null +++ b/internal/backend/local/local.go @@ -0,0 +1,295 @@ +package local + +import ( + "context" + "io" + "os" + "path/filepath" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/fs" +) + +// Local is a backend in a local directory. +type Local struct { + Config + backend.Layout +} + +// ensure statically that *Local implements restic.Backend. +var _ restic.Backend = &Local{} + +const defaultLayout = "default" + +// dirExists returns true if the name exists and is a directory. +func dirExists(name string) bool { + f, err := fs.Open(name) + if err != nil { + return false + } + + fi, err := f.Stat() + if err != nil { + return false + } + + if err = f.Close(); err != nil { + return false + } + + return fi.IsDir() +} + +// Open opens the local backend as specified by config. +func Open(cfg Config) (*Local, error) { + debug.Log("open local backend at %v (layout %q)", cfg.Path, cfg.Layout) + l, err := backend.ParseLayout(&backend.LocalFilesystem{}, cfg.Layout, defaultLayout, cfg.Path) + if err != nil { + return nil, err + } + + return &Local{Config: cfg, Layout: l}, nil +} + +// Create creates all the necessary files and directories for a new local +// backend at dir. Afterwards a new config blob should be created. +func Create(cfg Config) (*Local, error) { + debug.Log("create local backend at %v (layout %q)", cfg.Path, cfg.Layout) + + l, err := backend.ParseLayout(&backend.LocalFilesystem{}, cfg.Layout, defaultLayout, cfg.Path) + if err != nil { + return nil, err + } + + be := &Local{ + Config: cfg, + Layout: l, + } + + // test if config file already exists + _, err = fs.Lstat(be.Filename(restic.Handle{Type: restic.ConfigFile})) + if err == nil { + return nil, errors.New("config file already exists") + } + + // create paths for data and refs + for _, d := range be.Paths() { + err := fs.MkdirAll(d, backend.Modes.Dir) + if err != nil { + return nil, errors.Wrap(err, "MkdirAll") + } + } + + return be, nil +} + +// Location returns this backend's location (the directory name). +func (b *Local) Location() string { + return b.Path +} + +// IsNotExist returns true if the error is caused by a non existing file. +func (b *Local) IsNotExist(err error) bool { + return os.IsNotExist(errors.Cause(err)) +} + +// Save stores data in the backend at the handle. +func (b *Local) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error { + debug.Log("Save %v", h) + if err := h.Valid(); err != nil { + return err + } + + filename := b.Filename(h) + + // create new file + f, err := fs.OpenFile(filename, os.O_CREATE|os.O_EXCL|os.O_WRONLY, backend.Modes.File) + + if b.IsNotExist(err) { + debug.Log("error %v: creating dir", err) + + // error is caused by a missing directory, try to create it + mkdirErr := os.MkdirAll(filepath.Dir(filename), backend.Modes.Dir) + if mkdirErr != nil { + debug.Log("error creating dir %v: %v", filepath.Dir(filename), mkdirErr) + } else { + // try again + f, err = fs.OpenFile(filename, os.O_CREATE|os.O_EXCL|os.O_WRONLY, backend.Modes.File) + } + } + + if err != nil { + return errors.Wrap(err, "OpenFile") + } + + // save data, then sync + _, err = io.Copy(f, rd) + if err != nil { + _ = f.Close() + return errors.Wrap(err, "Write") + } + + if err = f.Sync(); err != nil { + _ = f.Close() + return errors.Wrap(err, "Sync") + } + + err = f.Close() + if err != nil { + return errors.Wrap(err, "Close") + } + + return setNewFileMode(filename, backend.Modes.File) +} + +// Load runs fn with a reader that yields the contents of the file at h at the +// given offset. +func (b *Local) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error { + return backend.DefaultLoad(ctx, h, length, offset, b.openReader, fn) +} + +func (b *Local) openReader(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) { + debug.Log("Load %v, length %v, offset %v", h, length, offset) + if err := h.Valid(); err != nil { + return nil, err + } + + if offset < 0 { + return nil, errors.New("offset is negative") + } + + f, err := fs.Open(b.Filename(h)) + if err != nil { + return nil, err + } + + if offset > 0 { + _, err = f.Seek(offset, 0) + if err != nil { + _ = f.Close() + return nil, err + } + } + + if length > 0 { + return backend.LimitReadCloser(f, int64(length)), nil + } + + return f, nil +} + +// Stat returns information about a blob. +func (b *Local) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) { + debug.Log("Stat %v", h) + if err := h.Valid(); err != nil { + return restic.FileInfo{}, err + } + + fi, err := fs.Stat(b.Filename(h)) + if err != nil { + return restic.FileInfo{}, errors.Wrap(err, "Stat") + } + + return restic.FileInfo{Size: fi.Size(), Name: h.Name}, nil +} + +// Test returns true if a blob of the given type and name exists in the backend. +func (b *Local) Test(ctx context.Context, h restic.Handle) (bool, error) { + debug.Log("Test %v", h) + _, err := fs.Stat(b.Filename(h)) + if err != nil { + if os.IsNotExist(errors.Cause(err)) { + return false, nil + } + return false, errors.Wrap(err, "Stat") + } + + return true, nil +} + +// Remove removes the blob with the given name and type. +func (b *Local) Remove(ctx context.Context, h restic.Handle) error { + debug.Log("Remove %v", h) + fn := b.Filename(h) + + // reset read-only flag + err := fs.Chmod(fn, 0666) + if err != nil { + return errors.Wrap(err, "Chmod") + } + + return fs.Remove(fn) +} + +func isFile(fi os.FileInfo) bool { + return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0 +} + +// List runs fn for each file in the backend which has the type t. When an +// error occurs (or fn returns an error), List stops and returns it. +func (b *Local) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error { + debug.Log("List %v", t) + + basedir, subdirs := b.Basedir(t) + err := fs.Walk(basedir, func(path string, fi os.FileInfo, err error) error { + debug.Log("walk on %v\n", path) + if err != nil { + return err + } + + if path == basedir { + return nil + } + + if !isFile(fi) { + return nil + } + + if fi.IsDir() && !subdirs { + return filepath.SkipDir + } + + debug.Log("send %v\n", filepath.Base(path)) + + rfi := restic.FileInfo{ + Name: filepath.Base(path), + Size: fi.Size(), + } + + if ctx.Err() != nil { + return ctx.Err() + } + + err = fn(rfi) + if err != nil { + return err + } + + return ctx.Err() + }) + + if b.IsNotExist(err) { + debug.Log("ignoring non-existing directory") + return nil + } + + return err +} + +// Delete removes the repository and all files. +func (b *Local) Delete(ctx context.Context) error { + debug.Log("Delete()") + return fs.RemoveAll(b.Path) +} + +// Close closes all open files. +func (b *Local) Close() error { + debug.Log("Close()") + // this does not need to do anything, all open files are closed within the + // same function. + return nil +} diff --git a/internal/backend/local/local_test.go b/internal/backend/local/local_test.go new file mode 100644 index 000000000..4ca3fdb71 --- /dev/null +++ b/internal/backend/local/local_test.go @@ -0,0 +1,136 @@ +package local_test + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/restic/restic/internal/backend/local" + "github.com/restic/restic/internal/backend/test" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func newTestSuite(t testing.TB) *test.Suite { + return &test.Suite{ + // NewConfig returns a config for a new temporary backend that will be used in tests. + NewConfig: func() (interface{}, error) { + dir, err := ioutil.TempDir(rtest.TestTempDir, "restic-test-local-") + if err != nil { + t.Fatal(err) + } + + t.Logf("create new backend at %v", dir) + + cfg := local.Config{ + Path: dir, + } + return cfg, nil + }, + + // CreateFn is a function that creates a temporary repository for the tests. + Create: func(config interface{}) (restic.Backend, error) { + cfg := config.(local.Config) + return local.Create(cfg) + }, + + // OpenFn is a function that opens a previously created temporary repository. + Open: func(config interface{}) (restic.Backend, error) { + cfg := config.(local.Config) + return local.Open(cfg) + }, + + // CleanupFn removes data created during the tests. + Cleanup: func(config interface{}) error { + cfg := config.(local.Config) + if !rtest.TestCleanupTempDirs { + t.Logf("leaving test backend dir at %v", cfg.Path) + } + + rtest.RemoveAll(t, cfg.Path) + return nil + }, + } +} + +func TestBackend(t *testing.T) { + newTestSuite(t).RunTests(t) +} + +func BenchmarkBackend(t *testing.B) { + newTestSuite(t).RunBenchmarks(t) +} + +func readdirnames(t testing.TB, dir string) []string { + f, err := os.Open(dir) + if err != nil { + t.Fatal(err) + } + + entries, err := f.Readdirnames(-1) + if err != nil { + t.Fatal(err) + } + + err = f.Close() + if err != nil { + t.Fatal(err) + } + + return entries +} + +func empty(t testing.TB, dir string) { + entries := readdirnames(t, dir) + if len(entries) != 0 { + t.Fatalf("directory %v is not empty, contains: %v", dir, entries) + } +} + +func openclose(t testing.TB, dir string) { + cfg := local.Config{Path: dir} + + be, err := local.Open(cfg) + if err != nil { + t.Logf("Open returned error %v", err) + } + + if be != nil { + err = be.Close() + if err != nil { + t.Logf("Close returned error %v", err) + } + } +} + +func mkdir(t testing.TB, dir string) { + err := os.Mkdir(dir, 0700) + if err != nil { + t.Fatal(err) + } +} + +func removeAll(t testing.TB, dir string) { + err := os.RemoveAll(dir) + if err != nil { + t.Fatal(err) + } +} + +func TestOpenNotExistingDirectory(t *testing.T) { + dir, cleanup := rtest.TempDir(t) + defer cleanup() + + // local.Open must not create any files dirs in the repo + openclose(t, filepath.Join(dir, "repo")) + empty(t, dir) + + openclose(t, dir) + empty(t, dir) + + mkdir(t, filepath.Join(dir, "data")) + openclose(t, dir) + removeAll(t, filepath.Join(dir, "data")) + empty(t, dir) +} diff --git a/internal/backend/local/local_unix.go b/internal/backend/local/local_unix.go new file mode 100644 index 000000000..74fb47bf4 --- /dev/null +++ b/internal/backend/local/local_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package local + +import ( + "os" + + "github.com/restic/restic/internal/fs" +) + +// set file to readonly +func setNewFileMode(f string, mode os.FileMode) error { + return fs.Chmod(f, mode) +} diff --git a/internal/backend/local/local_windows.go b/internal/backend/local/local_windows.go new file mode 100644 index 000000000..be8f62d96 --- /dev/null +++ b/internal/backend/local/local_windows.go @@ -0,0 +1,12 @@ +package local + +import ( + "os" +) + +// We don't modify read-only on windows, +// since it will make us unable to delete the file, +// and this isn't common practice on this platform. +func setNewFileMode(f string, mode os.FileMode) error { + return nil +} diff --git a/internal/backend/location/location.go b/internal/backend/location/location.go new file mode 100644 index 000000000..68f4dfaff --- /dev/null +++ b/internal/backend/location/location.go @@ -0,0 +1,113 @@ +// Package location implements parsing the restic repository location from a string. +package location + +import ( + "strings" + + "github.com/restic/restic/internal/backend/azure" + "github.com/restic/restic/internal/backend/b2" + "github.com/restic/restic/internal/backend/gs" + "github.com/restic/restic/internal/backend/local" + "github.com/restic/restic/internal/backend/rclone" + "github.com/restic/restic/internal/backend/rest" + "github.com/restic/restic/internal/backend/s3" + "github.com/restic/restic/internal/backend/sftp" + "github.com/restic/restic/internal/backend/swift" + "github.com/restic/restic/internal/errors" +) + +// Location specifies the location of a repository, including the method of +// access and (possibly) credentials needed for access. +type Location struct { + Scheme string + Config interface{} +} + +type parser struct { + scheme string + parse func(string) (interface{}, error) +} + +// parsers is a list of valid config parsers for the backends. The first parser +// is the fallback and should always be set to the local backend. +var parsers = []parser{ + {"b2", b2.ParseConfig}, + {"local", local.ParseConfig}, + {"sftp", sftp.ParseConfig}, + {"s3", s3.ParseConfig}, + {"gs", gs.ParseConfig}, + {"azure", azure.ParseConfig}, + {"swift", swift.ParseConfig}, + {"rest", rest.ParseConfig}, + {"rclone", rclone.ParseConfig}, +} + +func isPath(s string) bool { + if strings.HasPrefix(s, "../") || strings.HasPrefix(s, `..\`) { + return true + } + + if strings.HasPrefix(s, "/") || strings.HasPrefix(s, `\`) { + return true + } + + if len(s) < 3 { + return false + } + + // check for drive paths + drive := s[0] + if !(drive >= 'a' && drive <= 'z') && !(drive >= 'A' && drive <= 'Z') { + return false + } + + if s[1] != ':' { + return false + } + + if s[2] != '\\' && s[2] != '/' { + return false + } + + return true +} + +// Parse extracts repository location information from the string s. If s +// starts with a backend name followed by a colon, that backend's Parse() +// function is called. Otherwise, the local backend is used which interprets s +// as the name of a directory. +func Parse(s string) (u Location, err error) { + scheme := extractScheme(s) + u.Scheme = scheme + + for _, parser := range parsers { + if parser.scheme != scheme { + continue + } + + u.Config, err = parser.parse(s) + if err != nil { + return Location{}, err + } + + return u, nil + } + + // if s is not a path or contains ":", it's ambiguous + if !isPath(s) && strings.ContainsRune(s, ':') { + return Location{}, errors.New("invalid backend\nIf the repo is in a local directory, you need to add a `local:` prefix") + } + + u.Scheme = "local" + u.Config, err = local.ParseConfig("local:" + s) + if err != nil { + return Location{}, err + } + + return u, nil +} + +func extractScheme(s string) string { + data := strings.SplitN(s, ":", 2) + return data[0] +} diff --git a/internal/backend/location/location_test.go b/internal/backend/location/location_test.go new file mode 100644 index 000000000..3160a2af7 --- /dev/null +++ b/internal/backend/location/location_test.go @@ -0,0 +1,339 @@ +package location + +import ( + "net/url" + "reflect" + "testing" + + "github.com/restic/restic/internal/backend/b2" + "github.com/restic/restic/internal/backend/local" + "github.com/restic/restic/internal/backend/rest" + "github.com/restic/restic/internal/backend/s3" + "github.com/restic/restic/internal/backend/sftp" + "github.com/restic/restic/internal/backend/swift" +) + +func parseURL(s string) *url.URL { + u, err := url.Parse(s) + if err != nil { + panic(err) + } + + return u +} + +var parseTests = []struct { + s string + u Location +}{ + { + "local:/srv/repo", + Location{Scheme: "local", + Config: local.Config{ + Path: "/srv/repo", + }, + }, + }, + { + "local:dir1/dir2", + Location{Scheme: "local", + Config: local.Config{ + Path: "dir1/dir2", + }, + }, + }, + { + "local:dir1/dir2", + Location{Scheme: "local", + Config: local.Config{ + Path: "dir1/dir2", + }, + }, + }, + { + "dir1/dir2", + Location{Scheme: "local", + Config: local.Config{ + Path: "dir1/dir2", + }, + }, + }, + { + "/dir1/dir2", + Location{Scheme: "local", + Config: local.Config{ + Path: "/dir1/dir2", + }, + }, + }, + { + "local:../dir1/dir2", + Location{Scheme: "local", + Config: local.Config{ + Path: "../dir1/dir2", + }, + }, + }, + { + "/dir1/dir2", + Location{Scheme: "local", + Config: local.Config{ + Path: "/dir1/dir2", + }, + }, + }, + { + "/dir1:foobar/dir2", + Location{Scheme: "local", + Config: local.Config{ + Path: "/dir1:foobar/dir2", + }, + }, + }, + { + `\dir1\foobar\dir2`, + Location{Scheme: "local", + Config: local.Config{ + Path: `\dir1\foobar\dir2`, + }, + }, + }, + { + `c:\dir1\foobar\dir2`, + Location{Scheme: "local", + Config: local.Config{ + Path: `c:\dir1\foobar\dir2`, + }, + }, + }, + { + `C:\Users\appveyor\AppData\Local\Temp\1\restic-test-879453535\repo`, + Location{Scheme: "local", + Config: local.Config{ + Path: `C:\Users\appveyor\AppData\Local\Temp\1\restic-test-879453535\repo`, + }, + }, + }, + { + `c:/dir1/foobar/dir2`, + Location{Scheme: "local", + Config: local.Config{ + Path: `c:/dir1/foobar/dir2`, + }, + }, + }, + { + "sftp:user@host:/srv/repo", + Location{Scheme: "sftp", + Config: sftp.Config{ + User: "user", + Host: "host", + Path: "/srv/repo", + }, + }, + }, + { + "sftp:host:/srv/repo", + Location{Scheme: "sftp", + Config: sftp.Config{ + User: "", + Host: "host", + Path: "/srv/repo", + }, + }, + }, + { + "sftp://user@host/srv/repo", + Location{Scheme: "sftp", + Config: sftp.Config{ + User: "user", + Host: "host", + Path: "srv/repo", + }, + }, + }, + { + "sftp://user@host//srv/repo", + Location{Scheme: "sftp", + Config: sftp.Config{ + User: "user", + Host: "host", + Path: "/srv/repo", + }, + }, + }, + + { + "s3://eu-central-1/bucketname", + Location{Scheme: "s3", + Config: s3.Config{ + Endpoint: "eu-central-1", + Bucket: "bucketname", + Prefix: "", + Connections: 5, + }, + }, + }, + { + "s3://hostname.foo/bucketname", + Location{Scheme: "s3", + Config: s3.Config{ + Endpoint: "hostname.foo", + Bucket: "bucketname", + Prefix: "", + Connections: 5, + }, + }, + }, + { + "s3://hostname.foo/bucketname/prefix/directory", + Location{Scheme: "s3", + Config: s3.Config{ + Endpoint: "hostname.foo", + Bucket: "bucketname", + Prefix: "prefix/directory", + Connections: 5, + }, + }, + }, + { + "s3:eu-central-1/repo", + Location{Scheme: "s3", + Config: s3.Config{ + Endpoint: "eu-central-1", + Bucket: "repo", + Prefix: "", + Connections: 5, + }, + }, + }, + { + "s3:eu-central-1/repo/prefix/directory", + Location{Scheme: "s3", + Config: s3.Config{ + Endpoint: "eu-central-1", + Bucket: "repo", + Prefix: "prefix/directory", + Connections: 5, + }, + }, + }, + { + "s3:https://hostname.foo/repo", + Location{Scheme: "s3", + Config: s3.Config{ + Endpoint: "hostname.foo", + Bucket: "repo", + Prefix: "", + Connections: 5, + }, + }, + }, + { + "s3:https://hostname.foo/repo/prefix/directory", + Location{Scheme: "s3", + Config: s3.Config{ + Endpoint: "hostname.foo", + Bucket: "repo", + Prefix: "prefix/directory", + Connections: 5, + }, + }, + }, + { + "s3:http://hostname.foo/repo", + Location{Scheme: "s3", + Config: s3.Config{ + Endpoint: "hostname.foo", + Bucket: "repo", + Prefix: "", + UseHTTP: true, + Connections: 5, + }, + }, + }, + { + "swift:container17:/", + Location{Scheme: "swift", + Config: swift.Config{ + Container: "container17", + Prefix: "", + Connections: 5, + }, + }, + }, + { + "swift:container17:/prefix97", + Location{Scheme: "swift", + Config: swift.Config{ + Container: "container17", + Prefix: "prefix97", + Connections: 5, + }, + }, + }, + { + "rest:http://hostname.foo:1234/", + Location{Scheme: "rest", + Config: rest.Config{ + URL: parseURL("http://hostname.foo:1234/"), + Connections: 5, + }, + }, + }, + { + "b2:bucketname:/prefix", Location{Scheme: "b2", + Config: b2.Config{ + Bucket: "bucketname", + Prefix: "prefix", + Connections: 5, + }, + }, + }, + { + "b2:bucketname", Location{Scheme: "b2", + Config: b2.Config{ + Bucket: "bucketname", + Prefix: "", + Connections: 5, + }, + }, + }, +} + +func TestParse(t *testing.T) { + for i, test := range parseTests { + t.Run(test.s, func(t *testing.T) { + u, err := Parse(test.s) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if test.u.Scheme != u.Scheme { + t.Errorf("test %d: scheme does not match, want %q, got %q", + i, test.u.Scheme, u.Scheme) + } + + if !reflect.DeepEqual(test.u.Config, u.Config) { + t.Errorf("test %d: cfg map does not match, want:\n %#v\ngot: \n %#v", + i, test.u.Config, u.Config) + } + }) + } +} + +func TestInvalidScheme(t *testing.T) { + var invalidSchemes = []string{ + "foobar:xxx", + "foobar:/dir/dir2", + } + + for _, s := range invalidSchemes { + t.Run(s, func(t *testing.T) { + _, err := Parse(s) + if err == nil { + t.Fatalf("error for invalid location %q not found", s) + } + }) + } +} diff --git a/internal/backend/mem/mem_backend.go b/internal/backend/mem/mem_backend.go new file mode 100644 index 000000000..a8244be43 --- /dev/null +++ b/internal/backend/mem/mem_backend.go @@ -0,0 +1,223 @@ +package mem + +import ( + "bytes" + "context" + "io" + "io/ioutil" + "sync" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + + "github.com/restic/restic/internal/debug" +) + +type memMap map[restic.Handle][]byte + +// make sure that MemoryBackend implements backend.Backend +var _ restic.Backend = &MemoryBackend{} + +var errNotFound = errors.New("not found") + +// MemoryBackend is a mock backend that uses a map for storing all data in +// memory. This should only be used for tests. +type MemoryBackend struct { + data memMap + m sync.Mutex +} + +// New returns a new backend that saves all data in a map in memory. +func New() *MemoryBackend { + be := &MemoryBackend{ + data: make(memMap), + } + + debug.Log("created new memory backend") + + return be +} + +// Test returns whether a file exists. +func (be *MemoryBackend) Test(ctx context.Context, h restic.Handle) (bool, error) { + be.m.Lock() + defer be.m.Unlock() + + debug.Log("Test %v", h) + + if _, ok := be.data[h]; ok { + return true, nil + } + + return false, nil +} + +// IsNotExist returns true if the file does not exist. +func (be *MemoryBackend) IsNotExist(err error) bool { + return errors.Cause(err) == errNotFound +} + +// Save adds new Data to the backend. +func (be *MemoryBackend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error { + if err := h.Valid(); err != nil { + return err + } + + be.m.Lock() + defer be.m.Unlock() + + if h.Type == restic.ConfigFile { + h.Name = "" + } + + if _, ok := be.data[h]; ok { + return errors.New("file already exists") + } + + buf, err := ioutil.ReadAll(rd) + if err != nil { + return err + } + + be.data[h] = buf + debug.Log("saved %v bytes at %v", len(buf), h) + + return nil +} + +// Load runs fn with a reader that yields the contents of the file at h at the +// given offset. +func (be *MemoryBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error { + return backend.DefaultLoad(ctx, h, length, offset, be.openReader, fn) +} + +func (be *MemoryBackend) openReader(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) { + if err := h.Valid(); err != nil { + return nil, err + } + + be.m.Lock() + defer be.m.Unlock() + + if h.Type == restic.ConfigFile { + h.Name = "" + } + + debug.Log("Load %v offset %v len %v", h, offset, length) + + if offset < 0 { + return nil, errors.New("offset is negative") + } + + if _, ok := be.data[h]; !ok { + return nil, errNotFound + } + + buf := be.data[h] + if offset > int64(len(buf)) { + return nil, errors.New("offset beyond end of file") + } + + buf = buf[offset:] + if length > 0 && len(buf) > length { + buf = buf[:length] + } + + return ioutil.NopCloser(bytes.NewReader(buf)), nil +} + +// Stat returns information about a file in the backend. +func (be *MemoryBackend) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) { + be.m.Lock() + defer be.m.Unlock() + + if err := h.Valid(); err != nil { + return restic.FileInfo{}, err + } + + if h.Type == restic.ConfigFile { + h.Name = "" + } + + debug.Log("stat %v", h) + + e, ok := be.data[h] + if !ok { + return restic.FileInfo{}, errNotFound + } + + return restic.FileInfo{Size: int64(len(e)), Name: h.Name}, nil +} + +// Remove deletes a file from the backend. +func (be *MemoryBackend) Remove(ctx context.Context, h restic.Handle) error { + be.m.Lock() + defer be.m.Unlock() + + debug.Log("Remove %v", h) + + if _, ok := be.data[h]; !ok { + return errNotFound + } + + delete(be.data, h) + + return nil +} + +// List returns a channel which yields entries from the backend. +func (be *MemoryBackend) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error { + entries := make(map[string]int64) + + be.m.Lock() + for entry, buf := range be.data { + if entry.Type != t { + continue + } + + entries[entry.Name] = int64(len(buf)) + } + be.m.Unlock() + + for name, size := range entries { + fi := restic.FileInfo{ + Name: name, + Size: size, + } + + if ctx.Err() != nil { + return ctx.Err() + } + + err := fn(fi) + if err != nil { + return err + } + + if ctx.Err() != nil { + return ctx.Err() + } + } + + return ctx.Err() +} + +// Location returns the location of the backend (RAM). +func (be *MemoryBackend) Location() string { + return "RAM" +} + +// Delete removes all data in the backend. +func (be *MemoryBackend) Delete(ctx context.Context) error { + be.m.Lock() + defer be.m.Unlock() + + be.data = make(memMap) + return nil +} + +// Close closes the backend. +func (be *MemoryBackend) Close() error { + return nil +} diff --git a/internal/backend/mem/mem_backend_test.go b/internal/backend/mem/mem_backend_test.go new file mode 100644 index 000000000..15e66ac83 --- /dev/null +++ b/internal/backend/mem/mem_backend_test.go @@ -0,0 +1,66 @@ +package mem_test + +import ( + "context" + "testing" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + + "github.com/restic/restic/internal/backend/mem" + "github.com/restic/restic/internal/backend/test" +) + +type memConfig struct { + be restic.Backend +} + +func newTestSuite() *test.Suite { + return &test.Suite{ + // NewConfig returns a config for a new temporary backend that will be used in tests. + NewConfig: func() (interface{}, error) { + return &memConfig{}, nil + }, + + // CreateFn is a function that creates a temporary repository for the tests. + Create: func(cfg interface{}) (restic.Backend, error) { + c := cfg.(*memConfig) + if c.be != nil { + ok, err := c.be.Test(context.TODO(), restic.Handle{Type: restic.ConfigFile}) + if err != nil { + return nil, err + } + + if ok { + return nil, errors.New("config already exists") + } + } + + c.be = mem.New() + return c.be, nil + }, + + // OpenFn is a function that opens a previously created temporary repository. + Open: func(cfg interface{}) (restic.Backend, error) { + c := cfg.(*memConfig) + if c.be == nil { + c.be = mem.New() + } + return c.be, nil + }, + + // CleanupFn removes data created during the tests. + Cleanup: func(cfg interface{}) error { + // no cleanup needed + return nil + }, + } +} + +func TestSuiteBackendMem(t *testing.T) { + newTestSuite().RunTests(t) +} + +func BenchmarkSuiteBackendMem(t *testing.B) { + newTestSuite().RunBenchmarks(t) +} diff --git a/internal/backend/paths.go b/internal/backend/paths.go new file mode 100644 index 000000000..940e9fcb9 --- /dev/null +++ b/internal/backend/paths.go @@ -0,0 +1,26 @@ +package backend + +import "os" + +// Paths contains the default paths for file-based backends (e.g. local). +var Paths = struct { + Data string + Snapshots string + Index string + Locks string + Keys string + Temp string + Config string +}{ + "data", + "snapshots", + "index", + "locks", + "keys", + "tmp", + "config", +} + +// Modes holds the default modes for directories and files for file-based +// backends. +var Modes = struct{ Dir, File os.FileMode }{0700, 0600} diff --git a/internal/backend/rclone/backend.go b/internal/backend/rclone/backend.go new file mode 100644 index 000000000..f54625f94 --- /dev/null +++ b/internal/backend/rclone/backend.go @@ -0,0 +1,317 @@ +package rclone + +import ( + "bufio" + "context" + "crypto/tls" + "fmt" + "io" + "math/rand" + "net" + "net/http" + "net/url" + "os" + "os/exec" + "sync" + "time" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/backend/rest" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/limiter" + "golang.org/x/net/context/ctxhttp" + "golang.org/x/net/http2" +) + +// Backend is used to access data stored somewhere via rclone. +type Backend struct { + *rest.Backend + tr *http2.Transport + cmd *exec.Cmd + waitCh <-chan struct{} + waitResult error + wg *sync.WaitGroup + conn *StdioConn +} + +// run starts command with args and initializes the StdioConn. +func run(command string, args ...string) (*StdioConn, *exec.Cmd, *sync.WaitGroup, func() error, error) { + cmd := exec.Command(command, args...) + + p, err := cmd.StderrPipe() + if err != nil { + return nil, nil, nil, nil, err + } + + var wg sync.WaitGroup + + // start goroutine to add a prefix to all messages printed by to stderr by rclone + wg.Add(1) + go func() { + defer wg.Done() + sc := bufio.NewScanner(p) + for sc.Scan() { + fmt.Fprintf(os.Stderr, "rclone: %v\n", sc.Text()) + } + }() + + r, stdin, err := os.Pipe() + if err != nil { + return nil, nil, nil, nil, err + } + + stdout, w, err := os.Pipe() + if err != nil { + return nil, nil, nil, nil, err + } + + cmd.Stdin = r + cmd.Stdout = w + + bg, err := backend.StartForeground(cmd) + if err != nil { + return nil, nil, nil, nil, err + } + + c := &StdioConn{ + stdin: stdout, + stdout: stdin, + cmd: cmd, + } + + return c, cmd, &wg, bg, nil +} + +// wrappedConn adds bandwidth limiting capabilities to the StdioConn by +// wrapping the Read/Write methods. +type wrappedConn struct { + *StdioConn + io.Reader + io.Writer +} + +func (c wrappedConn) Read(p []byte) (int, error) { + return c.Reader.Read(p) +} + +func (c wrappedConn) Write(p []byte) (int, error) { + return c.Writer.Write(p) +} + +func wrapConn(c *StdioConn, lim limiter.Limiter) wrappedConn { + wc := wrappedConn{ + StdioConn: c, + Reader: c, + Writer: c, + } + if lim != nil { + wc.Reader = lim.Downstream(c) + wc.Writer = lim.UpstreamWriter(c) + } + + return wc +} + +// New initializes a Backend and starts the process. +func New(cfg Config, lim limiter.Limiter) (*Backend, error) { + var ( + args []string + err error + ) + + // build program args, start with the program + if cfg.Program != "" { + a, err := backend.SplitShellStrings(cfg.Program) + if err != nil { + return nil, err + } + args = append(args, a...) + } else { + args = append(args, "rclone") + } + + // then add the arguments + if cfg.Args != "" { + a, err := backend.SplitShellStrings(cfg.Args) + if err != nil { + return nil, err + } + + args = append(args, a...) + } else { + args = append(args, + "serve", "restic", "--stdio", + "--b2-hard-delete", "--drive-use-trash=false") + } + + // finally, add the remote + args = append(args, cfg.Remote) + arg0, args := args[0], args[1:] + + debug.Log("running command: %v %v", arg0, args) + stdioConn, cmd, wg, bg, err := run(arg0, args...) + if err != nil { + return nil, err + } + + var conn net.Conn = stdioConn + if lim != nil { + conn = wrapConn(stdioConn, lim) + } + + dialCount := 0 + tr := &http2.Transport{ + AllowHTTP: true, // this is not really HTTP, just stdin/stdout + DialTLS: func(network, address string, cfg *tls.Config) (net.Conn, error) { + debug.Log("new connection requested, %v %v", network, address) + if dialCount > 0 { + panic("dial count > 0") + } + dialCount++ + return conn, nil + }, + } + + waitCh := make(chan struct{}) + be := &Backend{ + tr: tr, + cmd: cmd, + waitCh: waitCh, + conn: stdioConn, + wg: wg, + } + + wg.Add(1) + go func() { + defer wg.Done() + debug.Log("waiting for error result") + err := cmd.Wait() + debug.Log("Wait returned %v", err) + be.waitResult = err + close(waitCh) + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + wg.Add(1) + go func() { + defer wg.Done() + debug.Log("monitoring command to cancel first HTTP request context") + select { + case <-ctx.Done(): + debug.Log("context has been cancelled, returning") + case <-be.waitCh: + debug.Log("command has exited, cancelling context") + cancel() + } + }() + + // send an HTTP request to the base URL, see if the server is there + client := &http.Client{ + Transport: debug.RoundTripper(tr), + Timeout: 60 * time.Second, + } + + // request a random file which does not exist. we just want to test when + // rclone is able to accept HTTP requests. + url := fmt.Sprintf("http://localhost/file-%d", rand.Uint64()) + + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return nil, err + } + req.Header.Set("Accept", rest.ContentTypeV2) + req.Cancel = ctx.Done() + + res, err := ctxhttp.Do(ctx, client, req) + if err != nil { + bg() + _ = cmd.Process.Kill() + return nil, errors.Errorf("error talking HTTP to rclone: %v", err) + } + + debug.Log("HTTP status %q returned, moving instance to background", res.Status) + bg() + + return be, nil +} + +// Open starts an rclone process with the given config. +func Open(cfg Config, lim limiter.Limiter) (*Backend, error) { + be, err := New(cfg, lim) + if err != nil { + return nil, err + } + + url, err := url.Parse("http://localhost/") + if err != nil { + return nil, err + } + + restConfig := rest.Config{ + Connections: cfg.Connections, + URL: url, + } + + restBackend, err := rest.Open(restConfig, debug.RoundTripper(be.tr)) + if err != nil { + return nil, err + } + + be.Backend = restBackend + return be, nil +} + +// Create initializes a new restic repo with clone. +func Create(cfg Config) (*Backend, error) { + be, err := New(cfg, nil) + if err != nil { + return nil, err + } + + debug.Log("new backend created") + + url, err := url.Parse("http://localhost/") + if err != nil { + return nil, err + } + + restConfig := rest.Config{ + Connections: 20, + URL: url, + } + + restBackend, err := rest.Create(restConfig, debug.RoundTripper(be.tr)) + if err != nil { + _ = be.Close() + return nil, err + } + + be.Backend = restBackend + return be, nil +} + +const waitForExit = 5 * time.Second + +// Close terminates the backend. +func (be *Backend) Close() error { + debug.Log("exiting rclone") + be.tr.CloseIdleConnections() + + select { + case <-be.waitCh: + debug.Log("rclone exited") + case <-time.After(waitForExit): + debug.Log("timeout, closing file descriptors") + err := be.conn.Close() + if err != nil { + return err + } + } + + be.wg.Wait() + debug.Log("wait for rclone returned: %v", be.waitResult) + return be.waitResult +} diff --git a/internal/backend/rclone/backend_test.go b/internal/backend/rclone/backend_test.go new file mode 100644 index 000000000..b7f3cebb4 --- /dev/null +++ b/internal/backend/rclone/backend_test.go @@ -0,0 +1,66 @@ +package rclone_test + +import ( + "os/exec" + "testing" + + "github.com/restic/restic/internal/backend/rclone" + "github.com/restic/restic/internal/backend/test" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func newTestSuite(t testing.TB) *test.Suite { + dir, cleanup := rtest.TempDir(t) + + return &test.Suite{ + // NewConfig returns a config for a new temporary backend that will be used in tests. + NewConfig: func() (interface{}, error) { + t.Logf("use backend at %v", dir) + cfg := rclone.NewConfig() + cfg.Remote = dir + return cfg, nil + }, + + // CreateFn is a function that creates a temporary repository for the tests. + Create: func(config interface{}) (restic.Backend, error) { + t.Logf("Create()") + cfg := config.(rclone.Config) + be, err := rclone.Create(cfg) + if e, ok := errors.Cause(err).(*exec.Error); ok && e.Err == exec.ErrNotFound { + t.Skipf("program %q not found", e.Name) + return nil, nil + } + return be, err + }, + + // OpenFn is a function that opens a previously created temporary repository. + Open: func(config interface{}) (restic.Backend, error) { + t.Logf("Open()") + cfg := config.(rclone.Config) + return rclone.Open(cfg, nil) + }, + + // CleanupFn removes data created during the tests. + Cleanup: func(config interface{}) error { + t.Logf("cleanup dir %v", dir) + cleanup() + return nil + }, + } +} + +func TestBackendRclone(t *testing.T) { + defer func() { + if t.Skipped() { + rtest.SkipDisallowed(t, "restic/backend/rclone.TestBackendRclone") + } + }() + + newTestSuite(t).RunTests(t) +} + +func BenchmarkBackendREST(t *testing.B) { + newTestSuite(t).RunBenchmarks(t) +} diff --git a/internal/backend/rclone/config.go b/internal/backend/rclone/config.go new file mode 100644 index 000000000..c2c5d88f9 --- /dev/null +++ b/internal/backend/rclone/config.go @@ -0,0 +1,39 @@ +package rclone + +import ( + "strings" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/options" +) + +// Config contains all configuration necessary to start rclone. +type Config struct { + Program string `option:"program" help:"path to rclone (default: rclone)"` + Args string `option:"args" help:"arguments for running rclone (default: serve restic --stdio --b2-hard-delete --drive-use-trash=false)"` + Remote string + Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"` +} + +func init() { + options.Register("rclone", Config{}) +} + +// NewConfig returns a new Config with the default values filled in. +func NewConfig() Config { + return Config{ + Connections: 5, + } +} + +// ParseConfig parses the string s and extracts the remote server URL. +func ParseConfig(s string) (interface{}, error) { + if !strings.HasPrefix(s, "rclone:") { + return nil, errors.New("invalid rclone backend specification") + } + + s = s[7:] + cfg := NewConfig() + cfg.Remote = s + return cfg, nil +} diff --git a/internal/backend/rclone/config_test.go b/internal/backend/rclone/config_test.go new file mode 100644 index 000000000..a59e5fb53 --- /dev/null +++ b/internal/backend/rclone/config_test.go @@ -0,0 +1,34 @@ +package rclone + +import ( + "reflect" + "testing" +) + +func TestParseConfig(t *testing.T) { + var tests = []struct { + s string + cfg Config + }{ + { + "rclone:local:foo:/bar", + Config{ + Remote: "local:foo:/bar", + Connections: 5, + }, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + cfg, err := ParseConfig(test.s) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(cfg, test.cfg) { + t.Fatalf("wrong config, want:\n %v\ngot:\n %v", test.cfg, cfg) + } + }) + } +} diff --git a/internal/backend/rclone/stdio_conn.go b/internal/backend/rclone/stdio_conn.go new file mode 100644 index 000000000..4abbb7c9a --- /dev/null +++ b/internal/backend/rclone/stdio_conn.go @@ -0,0 +1,74 @@ +package rclone + +import ( + "net" + "os" + "os/exec" + "sync" + + "github.com/restic/restic/internal/debug" +) + +// StdioConn implements a net.Conn via stdin/stdout. +type StdioConn struct { + stdin *os.File + stdout *os.File + cmd *exec.Cmd + close sync.Once +} + +func (s *StdioConn) Read(p []byte) (int, error) { + n, err := s.stdin.Read(p) + return n, err +} + +func (s *StdioConn) Write(p []byte) (int, error) { + n, err := s.stdout.Write(p) + return n, err +} + +// Close closes both streams. +func (s *StdioConn) Close() (err error) { + s.close.Do(func() { + debug.Log("close stdio connection") + var errs []error + + for _, f := range []func() error{s.stdin.Close, s.stdout.Close} { + err := f() + if err != nil { + errs = append(errs, err) + } + } + + if len(errs) > 0 { + err = errs[0] + } + }) + + return err +} + +// LocalAddr returns nil. +func (s *StdioConn) LocalAddr() net.Addr { + return Addr{} +} + +// RemoteAddr returns nil. +func (s *StdioConn) RemoteAddr() net.Addr { + return Addr{} +} + +// make sure StdioConn implements net.Conn +var _ net.Conn = &StdioConn{} + +// Addr implements net.Addr for stdin/stdout. +type Addr struct{} + +// Network returns the network type as a string. +func (a Addr) Network() string { + return "stdio" +} + +func (a Addr) String() string { + return "stdio" +} diff --git a/internal/backend/rclone/stdio_conn_go110.go b/internal/backend/rclone/stdio_conn_go110.go new file mode 100644 index 000000000..b21f65f04 --- /dev/null +++ b/internal/backend/rclone/stdio_conn_go110.go @@ -0,0 +1,25 @@ +// +build go1.10 + +package rclone + +import "time" + +// SetDeadline sets the read/write deadline. +func (s *StdioConn) SetDeadline(t time.Time) error { + err1 := s.stdin.SetReadDeadline(t) + err2 := s.stdout.SetWriteDeadline(t) + if err1 != nil { + return err1 + } + return err2 +} + +// SetReadDeadline sets the read/write deadline. +func (s *StdioConn) SetReadDeadline(t time.Time) error { + return s.stdin.SetReadDeadline(t) +} + +// SetWriteDeadline sets the read/write deadline. +func (s *StdioConn) SetWriteDeadline(t time.Time) error { + return s.stdout.SetWriteDeadline(t) +} diff --git a/internal/backend/rclone/stdio_conn_other.go b/internal/backend/rclone/stdio_conn_other.go new file mode 100644 index 000000000..07f85961b --- /dev/null +++ b/internal/backend/rclone/stdio_conn_other.go @@ -0,0 +1,22 @@ +// +build !go1.10 + +package rclone + +import "time" + +// On Go < 1.10, it's not possible to set read/write deadlines on files, so we just ignore that. + +// SetDeadline sets the read/write deadline. +func (s *StdioConn) SetDeadline(t time.Time) error { + return nil +} + +// SetReadDeadline sets the read/write deadline. +func (s *StdioConn) SetReadDeadline(t time.Time) error { + return nil +} + +// SetWriteDeadline sets the read/write deadline. +func (s *StdioConn) SetWriteDeadline(t time.Time) error { + return nil +} diff --git a/internal/backend/rest/config.go b/internal/backend/rest/config.go new file mode 100644 index 000000000..60c6bf92b --- /dev/null +++ b/internal/backend/rest/config.go @@ -0,0 +1,48 @@ +package rest + +import ( + "net/url" + "strings" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/options" +) + +// Config contains all configuration necessary to connect to a REST server. +type Config struct { + URL *url.URL + Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"` +} + +func init() { + options.Register("rest", Config{}) +} + +// NewConfig returns a new Config with the default values filled in. +func NewConfig() Config { + return Config{ + Connections: 5, + } +} + +// ParseConfig parses the string s and extracts the REST server URL. +func ParseConfig(s string) (interface{}, error) { + if !strings.HasPrefix(s, "rest:") { + return nil, errors.New("invalid REST backend specification") + } + + s = s[5:] + if !strings.HasSuffix(s, "/") { + s += "/" + } + + u, err := url.Parse(s) + + if err != nil { + return nil, errors.Wrap(err, "url.Parse") + } + + cfg := NewConfig() + cfg.URL = u + return cfg, nil +} diff --git a/internal/backend/rest/config_test.go b/internal/backend/rest/config_test.go new file mode 100644 index 000000000..2d8e32a73 --- /dev/null +++ b/internal/backend/rest/config_test.go @@ -0,0 +1,52 @@ +package rest + +import ( + "net/url" + "reflect" + "testing" +) + +func parseURL(s string) *url.URL { + u, err := url.Parse(s) + if err != nil { + panic(err) + } + + return u +} + +var configTests = []struct { + s string + cfg Config +}{ + { + s: "rest:http://localhost:1234", + cfg: Config{ + URL: parseURL("http://localhost:1234/"), + Connections: 5, + }, + }, + { + s: "rest:http://localhost:1234/", + cfg: Config{ + URL: parseURL("http://localhost:1234/"), + Connections: 5, + }, + }, +} + +func TestParseConfig(t *testing.T) { + for _, test := range configTests { + t.Run("", func(t *testing.T) { + cfg, err := ParseConfig(test.s) + if err != nil { + t.Fatalf("%s failed: %v", test.s, err) + } + + if !reflect.DeepEqual(cfg, test.cfg) { + t.Fatalf("\ninput: %s\n wrong config, want:\n %v\ngot:\n %v", + test.s, test.cfg, cfg) + } + }) + } +} diff --git a/internal/backend/rest/rest.go b/internal/backend/rest/rest.go new file mode 100644 index 000000000..3e41265a9 --- /dev/null +++ b/internal/backend/rest/rest.go @@ -0,0 +1,463 @@ +package rest + +import ( + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "path" + "strings" + + "golang.org/x/net/context/ctxhttp" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + + "github.com/restic/restic/internal/backend" +) + +// make sure the rest backend implements restic.Backend +var _ restic.Backend = &Backend{} + +// Backend uses the REST protocol to access data stored on a server. +type Backend struct { + url *url.URL + sem *backend.Semaphore + client *http.Client + backend.Layout +} + +// the REST API protocol version is decided by HTTP request headers, these are the constants. +const ( + ContentTypeV1 = "application/vnd.x.restic.rest.v1" + ContentTypeV2 = "application/vnd.x.restic.rest.v2" +) + +// Open opens the REST backend with the given config. +func Open(cfg Config, rt http.RoundTripper) (*Backend, error) { + client := &http.Client{Transport: rt} + + sem, err := backend.NewSemaphore(cfg.Connections) + if err != nil { + return nil, err + } + + // use url without trailing slash for layout + url := cfg.URL.String() + if url[len(url)-1] == '/' { + url = url[:len(url)-1] + } + + be := &Backend{ + url: cfg.URL, + client: client, + Layout: &backend.RESTLayout{URL: url, Join: path.Join}, + sem: sem, + } + + return be, nil +} + +// Create creates a new REST on server configured in config. +func Create(cfg Config, rt http.RoundTripper) (*Backend, error) { + be, err := Open(cfg, rt) + if err != nil { + return nil, err + } + + _, err = be.Stat(context.TODO(), restic.Handle{Type: restic.ConfigFile}) + if err == nil { + return nil, errors.Fatal("config file already exists") + } + + url := *cfg.URL + values := url.Query() + values.Set("create", "true") + url.RawQuery = values.Encode() + + resp, err := be.client.Post(url.String(), "binary/octet-stream", strings.NewReader("")) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, errors.Fatalf("server response unexpected: %v (%v)", resp.Status, resp.StatusCode) + } + + _, err = io.Copy(ioutil.Discard, resp.Body) + if err != nil { + return nil, err + } + + err = resp.Body.Close() + if err != nil { + return nil, err + } + + return be, nil +} + +// Location returns this backend's location (the server's URL). +func (b *Backend) Location() string { + return b.url.String() +} + +// Save stores data in the backend at the handle. +func (b *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error { + if err := h.Valid(); err != nil { + return err + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // make sure that client.Post() cannot close the reader by wrapping it + req, err := http.NewRequest(http.MethodPost, b.Filename(h), ioutil.NopCloser(rd)) + if err != nil { + return errors.Wrap(err, "NewRequest") + } + req.Header.Set("Content-Type", "application/octet-stream") + req.Header.Set("Accept", ContentTypeV2) + + // explicitly set the content length, this prevents chunked encoding and + // let's the server know what's coming. + req.ContentLength = rd.Length() + + b.sem.GetToken() + resp, err := ctxhttp.Do(ctx, b.client, req) + b.sem.ReleaseToken() + + if resp != nil { + defer func() { + _, _ = io.Copy(ioutil.Discard, resp.Body) + e := resp.Body.Close() + + if err == nil { + err = errors.Wrap(e, "Close") + } + }() + } + + if err != nil { + return errors.Wrap(err, "client.Post") + } + + if resp.StatusCode != 200 { + return errors.Errorf("server response unexpected: %v (%v)", resp.Status, resp.StatusCode) + } + + return nil +} + +// ErrIsNotExist is returned whenever the requested file does not exist on the +// server. +type ErrIsNotExist struct { + restic.Handle +} + +func (e ErrIsNotExist) Error() string { + return fmt.Sprintf("%v does not exist", e.Handle) +} + +// IsNotExist returns true if the error was caused by a non-existing file. +func (b *Backend) IsNotExist(err error) bool { + err = errors.Cause(err) + _, ok := err.(ErrIsNotExist) + return ok +} + +// Load runs fn with a reader that yields the contents of the file at h at the +// given offset. +func (b *Backend) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error { + return backend.DefaultLoad(ctx, h, length, offset, b.openReader, fn) +} + +func (b *Backend) openReader(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) { + debug.Log("Load %v, length %v, offset %v", h, length, offset) + if err := h.Valid(); err != nil { + return nil, err + } + + if offset < 0 { + return nil, errors.New("offset is negative") + } + + if length < 0 { + return nil, errors.Errorf("invalid length %d", length) + } + + req, err := http.NewRequest("GET", b.Filename(h), nil) + if err != nil { + return nil, errors.Wrap(err, "http.NewRequest") + } + + byteRange := fmt.Sprintf("bytes=%d-", offset) + if length > 0 { + byteRange = fmt.Sprintf("bytes=%d-%d", offset, offset+int64(length)-1) + } + req.Header.Set("Range", byteRange) + req.Header.Set("Accept", ContentTypeV2) + debug.Log("Load(%v) send range %v", h, byteRange) + + b.sem.GetToken() + resp, err := ctxhttp.Do(ctx, b.client, req) + b.sem.ReleaseToken() + + if err != nil { + if resp != nil { + _, _ = io.Copy(ioutil.Discard, resp.Body) + _ = resp.Body.Close() + } + return nil, errors.Wrap(err, "client.Do") + } + + if resp.StatusCode == http.StatusNotFound { + _ = resp.Body.Close() + return nil, ErrIsNotExist{h} + } + + if resp.StatusCode != 200 && resp.StatusCode != 206 { + _ = resp.Body.Close() + return nil, errors.Errorf("unexpected HTTP response (%v): %v", resp.StatusCode, resp.Status) + } + + return resp.Body, nil +} + +// Stat returns information about a blob. +func (b *Backend) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) { + if err := h.Valid(); err != nil { + return restic.FileInfo{}, err + } + + req, err := http.NewRequest(http.MethodHead, b.Filename(h), nil) + if err != nil { + return restic.FileInfo{}, errors.Wrap(err, "NewRequest") + } + req.Header.Set("Accept", ContentTypeV2) + + b.sem.GetToken() + resp, err := ctxhttp.Do(ctx, b.client, req) + b.sem.ReleaseToken() + if err != nil { + return restic.FileInfo{}, errors.Wrap(err, "client.Head") + } + + _, _ = io.Copy(ioutil.Discard, resp.Body) + if err = resp.Body.Close(); err != nil { + return restic.FileInfo{}, errors.Wrap(err, "Close") + } + + if resp.StatusCode == http.StatusNotFound { + _ = resp.Body.Close() + return restic.FileInfo{}, ErrIsNotExist{h} + } + + if resp.StatusCode != 200 { + return restic.FileInfo{}, errors.Errorf("unexpected HTTP response (%v): %v", resp.StatusCode, resp.Status) + } + + if resp.ContentLength < 0 { + return restic.FileInfo{}, errors.New("negative content length") + } + + bi := restic.FileInfo{ + Size: resp.ContentLength, + Name: h.Name, + } + + return bi, nil +} + +// Test returns true if a blob of the given type and name exists in the backend. +func (b *Backend) Test(ctx context.Context, h restic.Handle) (bool, error) { + _, err := b.Stat(ctx, h) + if err != nil { + return false, nil + } + + return true, nil +} + +// Remove removes the blob with the given name and type. +func (b *Backend) Remove(ctx context.Context, h restic.Handle) error { + if err := h.Valid(); err != nil { + return err + } + + req, err := http.NewRequest("DELETE", b.Filename(h), nil) + if err != nil { + return errors.Wrap(err, "http.NewRequest") + } + req.Header.Set("Accept", ContentTypeV2) + + b.sem.GetToken() + resp, err := ctxhttp.Do(ctx, b.client, req) + b.sem.ReleaseToken() + + if err != nil { + return errors.Wrap(err, "client.Do") + } + + if resp.StatusCode == http.StatusNotFound { + _ = resp.Body.Close() + return ErrIsNotExist{h} + } + + if resp.StatusCode != 200 { + return errors.Errorf("blob not removed, server response: %v (%v)", resp.Status, resp.StatusCode) + } + + _, err = io.Copy(ioutil.Discard, resp.Body) + if err != nil { + return errors.Wrap(err, "Copy") + } + + return errors.Wrap(resp.Body.Close(), "Close") +} + +// List runs fn for each file in the backend which has the type t. When an +// error occurs (or fn returns an error), List stops and returns it. +func (b *Backend) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error { + url := b.Dirname(restic.Handle{Type: t}) + if !strings.HasSuffix(url, "/") { + url += "/" + } + + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return errors.Wrap(err, "NewRequest") + } + req.Header.Set("Accept", ContentTypeV2) + + b.sem.GetToken() + resp, err := ctxhttp.Do(ctx, b.client, req) + b.sem.ReleaseToken() + + if err != nil { + return errors.Wrap(err, "List") + } + + if resp.StatusCode != 200 { + return errors.Errorf("List failed, server response: %v (%v)", resp.Status, resp.StatusCode) + } + + if resp.Header.Get("Content-Type") == ContentTypeV2 { + return b.listv2(ctx, t, resp, fn) + } + + return b.listv1(ctx, t, resp, fn) +} + +// listv1 uses the REST protocol v1, where a list HTTP request (e.g. `GET +// /data/`) only returns the names of the files, so we need to issue an HTTP +// HEAD request for each file. +func (b *Backend) listv1(ctx context.Context, t restic.FileType, resp *http.Response, fn func(restic.FileInfo) error) error { + debug.Log("parsing API v1 response") + dec := json.NewDecoder(resp.Body) + var list []string + if err := dec.Decode(&list); err != nil { + return errors.Wrap(err, "Decode") + } + + for _, m := range list { + fi, err := b.Stat(ctx, restic.Handle{Name: m, Type: t}) + if err != nil { + return err + } + + if ctx.Err() != nil { + return ctx.Err() + } + + fi.Name = m + err = fn(fi) + if err != nil { + return err + } + + if ctx.Err() != nil { + return ctx.Err() + } + } + + return ctx.Err() +} + +// listv2 uses the REST protocol v2, where a list HTTP request (e.g. `GET +// /data/`) returns the names and sizes of all files. +func (b *Backend) listv2(ctx context.Context, t restic.FileType, resp *http.Response, fn func(restic.FileInfo) error) error { + debug.Log("parsing API v2 response") + dec := json.NewDecoder(resp.Body) + + var list []struct { + Name string `json:"name"` + Size int64 `json:"size"` + } + if err := dec.Decode(&list); err != nil { + return errors.Wrap(err, "Decode") + } + + for _, item := range list { + if ctx.Err() != nil { + return ctx.Err() + } + + fi := restic.FileInfo{ + Name: item.Name, + Size: item.Size, + } + + err := fn(fi) + if err != nil { + return err + } + + if ctx.Err() != nil { + return ctx.Err() + } + } + + return ctx.Err() +} + +// Close closes all open files. +func (b *Backend) Close() error { + // this does not need to do anything, all open files are closed within the + // same function. + return nil +} + +// Remove keys for a specified backend type. +func (b *Backend) removeKeys(ctx context.Context, t restic.FileType) error { + return b.List(ctx, t, func(fi restic.FileInfo) error { + return b.Remove(ctx, restic.Handle{Type: t, Name: fi.Name}) + }) +} + +// Delete removes all data in the backend. +func (b *Backend) Delete(ctx context.Context) error { + alltypes := []restic.FileType{ + restic.DataFile, + restic.KeyFile, + restic.LockFile, + restic.SnapshotFile, + restic.IndexFile} + + for _, t := range alltypes { + err := b.removeKeys(ctx, t) + if err != nil { + return nil + } + } + + err := b.Remove(ctx, restic.Handle{Type: restic.ConfigFile}) + if err != nil && b.IsNotExist(err) { + return nil + } + return err +} diff --git a/internal/backend/rest/rest_int_test.go b/internal/backend/rest/rest_int_test.go new file mode 100644 index 000000000..ea4e265fd --- /dev/null +++ b/internal/backend/rest/rest_int_test.go @@ -0,0 +1,150 @@ +package rest_test + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "strconv" + "testing" + + "github.com/restic/restic/internal/backend/rest" + "github.com/restic/restic/internal/restic" +) + +func TestListAPI(t *testing.T) { + var tests = []struct { + Name string + + ContentType string // response header + Data string // response data + Requests int + + Result []restic.FileInfo + }{ + { + Name: "content-type-unknown", + ContentType: "application/octet-stream", + Data: `[ + "1122e6749358b057fa1ac6b580a0fbe7a9a5fbc92e82743ee21aaf829624a985", + "3b6ec1af8d4f7099d0445b12fdb75b166ba19f789e5c48350c423dc3b3e68352", + "8271d221a60e0058e6c624f248d0080fc04f4fac07a28584a9b89d0eb69e189b" + ]`, + Result: []restic.FileInfo{ + {Name: "1122e6749358b057fa1ac6b580a0fbe7a9a5fbc92e82743ee21aaf829624a985", Size: 4386}, + {Name: "3b6ec1af8d4f7099d0445b12fdb75b166ba19f789e5c48350c423dc3b3e68352", Size: 15214}, + {Name: "8271d221a60e0058e6c624f248d0080fc04f4fac07a28584a9b89d0eb69e189b", Size: 33393}, + }, + Requests: 4, + }, + { + Name: "content-type-v1", + ContentType: "application/vnd.x.restic.rest.v1", + Data: `[ + "1122e6749358b057fa1ac6b580a0fbe7a9a5fbc92e82743ee21aaf829624a985", + "3b6ec1af8d4f7099d0445b12fdb75b166ba19f789e5c48350c423dc3b3e68352", + "8271d221a60e0058e6c624f248d0080fc04f4fac07a28584a9b89d0eb69e189b" + ]`, + Result: []restic.FileInfo{ + {Name: "1122e6749358b057fa1ac6b580a0fbe7a9a5fbc92e82743ee21aaf829624a985", Size: 4386}, + {Name: "3b6ec1af8d4f7099d0445b12fdb75b166ba19f789e5c48350c423dc3b3e68352", Size: 15214}, + {Name: "8271d221a60e0058e6c624f248d0080fc04f4fac07a28584a9b89d0eb69e189b", Size: 33393}, + }, + Requests: 4, + }, + { + Name: "content-type-v2", + ContentType: "application/vnd.x.restic.rest.v2", + Data: `[ + {"name": "1122e6749358b057fa1ac6b580a0fbe7a9a5fbc92e82743ee21aaf829624a985", "size": 1001}, + {"name": "3b6ec1af8d4f7099d0445b12fdb75b166ba19f789e5c48350c423dc3b3e68352", "size": 1002}, + {"name": "8271d221a60e0058e6c624f248d0080fc04f4fac07a28584a9b89d0eb69e189b", "size": 1003} + ]`, + Result: []restic.FileInfo{ + {Name: "1122e6749358b057fa1ac6b580a0fbe7a9a5fbc92e82743ee21aaf829624a985", Size: 1001}, + {Name: "3b6ec1af8d4f7099d0445b12fdb75b166ba19f789e5c48350c423dc3b3e68352", Size: 1002}, + {Name: "8271d221a60e0058e6c624f248d0080fc04f4fac07a28584a9b89d0eb69e189b", Size: 1003}, + }, + Requests: 1, + }, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + numRequests := 0 + srv := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + numRequests++ + t.Logf("req %v %v, accept: %v", req.Method, req.URL.Path, req.Header["Accept"]) + + var err error + switch { + case req.Method == "GET": + // list files in data/ + res.Header().Set("Content-Type", test.ContentType) + _, err = res.Write([]byte(test.Data)) + + if err != nil { + t.Fatal(err) + } + return + case req.Method == "HEAD": + // stat file in data/, use the first two bytes in the name + // of the file as the size :) + filename := req.URL.Path[6:] + len, err := strconv.ParseInt(filename[:4], 16, 64) + if err != nil { + t.Fatal(err) + } + + res.Header().Set("Content-Length", fmt.Sprintf("%d", len)) + res.WriteHeader(http.StatusOK) + return + } + + t.Errorf("unhandled request %v %v", req.Method, req.URL.Path) + })) + defer srv.Close() + + srvURL, err := url.Parse(srv.URL) + if err != nil { + t.Fatal(err) + } + + cfg := rest.Config{ + Connections: 5, + URL: srvURL, + } + + be, err := rest.Open(cfg, http.DefaultTransport) + if err != nil { + t.Fatal(err) + } + + var list []restic.FileInfo + err = be.List(context.TODO(), restic.DataFile, func(fi restic.FileInfo) error { + list = append(list, fi) + return nil + }) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(list, test.Result) { + t.Fatalf("wrong response returned, want:\n %v\ngot: %v", test.Result, list) + } + + if numRequests != test.Requests { + t.Fatalf("wrong number of HTTP requests executed, want %d, got %d", test.Requests, numRequests) + } + + defer func() { + err = be.Close() + if err != nil { + t.Fatal(err) + } + }() + }) + } +} diff --git a/internal/backend/rest/rest_test.go b/internal/backend/rest/rest_test.go new file mode 100644 index 000000000..486f241a9 --- /dev/null +++ b/internal/backend/rest/rest_test.go @@ -0,0 +1,154 @@ +package rest_test + +import ( + "context" + "net" + "net/url" + "os" + "os/exec" + "testing" + "time" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/backend/rest" + "github.com/restic/restic/internal/backend/test" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func runRESTServer(ctx context.Context, t testing.TB, dir string) (*url.URL, func()) { + srv, err := exec.LookPath("rest-server") + if err != nil { + t.Skip(err) + } + + cmd := exec.CommandContext(ctx, srv, "--no-auth", "--path", dir) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stdout + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + // wait until the TCP port is reachable + var success bool + for i := 0; i < 10; i++ { + time.Sleep(200 * time.Millisecond) + + c, err := net.Dial("tcp", "localhost:8000") + if err != nil { + continue + } + + success = true + if err := c.Close(); err != nil { + t.Fatal(err) + } + } + + if !success { + t.Fatal("unable to connect to rest server") + return nil, nil + } + + url, err := url.Parse("http://localhost:8000/restic-test") + if err != nil { + t.Fatal(err) + } + + cleanup := func() { + if err := cmd.Process.Kill(); err != nil { + t.Fatal(err) + } + + // ignore errors, we've killed the process + _ = cmd.Wait() + } + + return url, cleanup +} + +func newTestSuite(ctx context.Context, t testing.TB, url *url.URL, minimalData bool) *test.Suite { + tr, err := backend.Transport(backend.TransportOptions{}) + if err != nil { + t.Fatalf("cannot create transport for tests: %v", err) + } + + return &test.Suite{ + MinimalData: minimalData, + + // NewConfig returns a config for a new temporary backend that will be used in tests. + NewConfig: func() (interface{}, error) { + cfg := rest.NewConfig() + cfg.URL = url + return cfg, nil + }, + + // CreateFn is a function that creates a temporary repository for the tests. + Create: func(config interface{}) (restic.Backend, error) { + cfg := config.(rest.Config) + return rest.Create(cfg, tr) + }, + + // OpenFn is a function that opens a previously created temporary repository. + Open: func(config interface{}) (restic.Backend, error) { + cfg := config.(rest.Config) + return rest.Open(cfg, tr) + }, + + // CleanupFn removes data created during the tests. + Cleanup: func(config interface{}) error { + return nil + }, + } +} + +func TestBackendREST(t *testing.T) { + defer func() { + if t.Skipped() { + rtest.SkipDisallowed(t, "restic/backend/rest.TestBackendREST") + } + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dir, cleanup := rtest.TempDir(t) + defer cleanup() + + serverURL, cleanup := runRESTServer(ctx, t, dir) + defer cleanup() + + newTestSuite(ctx, t, serverURL, false).RunTests(t) +} + +func TestBackendRESTExternalServer(t *testing.T) { + repostr := os.Getenv("RESTIC_TEST_REST_REPOSITORY") + if repostr == "" { + t.Skipf("environment variable %v not set", "RESTIC_TEST_REST_REPOSITORY") + } + + cfg, err := rest.ParseConfig(repostr) + if err != nil { + t.Fatal(err) + } + + c := cfg.(rest.Config) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + newTestSuite(ctx, t, c.URL, true).RunTests(t) +} + +func BenchmarkBackendREST(t *testing.B) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dir, cleanup := rtest.TempDir(t) + defer cleanup() + + serverURL, cleanup := runRESTServer(ctx, t, dir) + defer cleanup() + + newTestSuite(ctx, t, serverURL, false).RunBenchmarks(t) +} diff --git a/internal/backend/s3/config.go b/internal/backend/s3/config.go new file mode 100644 index 000000000..1760f48c2 --- /dev/null +++ b/internal/backend/s3/config.go @@ -0,0 +1,87 @@ +package s3 + +import ( + "net/url" + "path" + "strings" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/options" +) + +// Config contains all configuration necessary to connect to an s3 compatible +// server. +type Config struct { + Endpoint string + UseHTTP bool + KeyID, Secret string + Bucket string + Prefix string + Layout string `option:"layout" help:"use this backend layout (default: auto-detect)"` + + Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"` + MaxRetries uint `option:"retries" help:"set the number of retries attempted"` +} + +// NewConfig returns a new Config with the default values filled in. +func NewConfig() Config { + return Config{ + Connections: 5, + } +} + +func init() { + options.Register("s3", Config{}) +} + +// ParseConfig parses the string s and extracts the s3 config. The two +// supported configuration formats are s3://host/bucketname/prefix and +// s3:host/bucketname/prefix. The host can also be a valid s3 region +// name. If no prefix is given the prefix "restic" will be used. +func ParseConfig(s string) (interface{}, error) { + switch { + case strings.HasPrefix(s, "s3:http"): + // assume that a URL has been specified, parse it and + // use the host as the endpoint and the path as the + // bucket name and prefix + url, err := url.Parse(s[3:]) + if err != nil { + return nil, errors.Wrap(err, "url.Parse") + } + + if url.Path == "" { + return nil, errors.New("s3: bucket name not found") + } + + path := strings.SplitN(url.Path[1:], "/", 2) + return createConfig(url.Host, path, url.Scheme == "http") + case strings.HasPrefix(s, "s3://"): + s = s[5:] + case strings.HasPrefix(s, "s3:"): + s = s[3:] + default: + return nil, errors.New("s3: invalid format") + } + // use the first entry of the path as the endpoint and the + // remainder as bucket name and prefix + path := strings.SplitN(s, "/", 3) + return createConfig(path[0], path[1:], false) +} + +func createConfig(endpoint string, p []string, useHTTP bool) (interface{}, error) { + if len(p) < 1 { + return nil, errors.New("s3: invalid format, host/region or bucket name not found") + } + + var prefix string + if len(p) > 1 && p[1] != "" { + prefix = path.Clean(p[1]) + } + + cfg := NewConfig() + cfg.Endpoint = endpoint + cfg.UseHTTP = useHTTP + cfg.Bucket = p[0] + cfg.Prefix = prefix + return cfg, nil +} diff --git a/internal/backend/s3/config_test.go b/internal/backend/s3/config_test.go new file mode 100644 index 000000000..77a31fda3 --- /dev/null +++ b/internal/backend/s3/config_test.go @@ -0,0 +1,113 @@ +package s3 + +import "testing" + +var configTests = []struct { + s string + cfg Config +}{ + {"s3://eu-central-1/bucketname", Config{ + Endpoint: "eu-central-1", + Bucket: "bucketname", + Prefix: "", + Connections: 5, + }}, + {"s3://eu-central-1/bucketname/", Config{ + Endpoint: "eu-central-1", + Bucket: "bucketname", + Prefix: "", + Connections: 5, + }}, + {"s3://eu-central-1/bucketname/prefix/directory", Config{ + Endpoint: "eu-central-1", + Bucket: "bucketname", + Prefix: "prefix/directory", + Connections: 5, + }}, + {"s3://eu-central-1/bucketname/prefix/directory/", Config{ + Endpoint: "eu-central-1", + Bucket: "bucketname", + Prefix: "prefix/directory", + Connections: 5, + }}, + {"s3:eu-central-1/foobar", Config{ + Endpoint: "eu-central-1", + Bucket: "foobar", + Prefix: "", + Connections: 5, + }}, + {"s3:eu-central-1/foobar/", Config{ + Endpoint: "eu-central-1", + Bucket: "foobar", + Prefix: "", + Connections: 5, + }}, + {"s3:eu-central-1/foobar/prefix/directory", Config{ + Endpoint: "eu-central-1", + Bucket: "foobar", + Prefix: "prefix/directory", + Connections: 5, + }}, + {"s3:eu-central-1/foobar/prefix/directory/", Config{ + Endpoint: "eu-central-1", + Bucket: "foobar", + Prefix: "prefix/directory", + Connections: 5, + }}, + {"s3:https://hostname:9999/foobar", Config{ + Endpoint: "hostname:9999", + Bucket: "foobar", + Prefix: "", + Connections: 5, + }}, + {"s3:https://hostname:9999/foobar/", Config{ + Endpoint: "hostname:9999", + Bucket: "foobar", + Prefix: "", + Connections: 5, + }}, + {"s3:http://hostname:9999/foobar", Config{ + Endpoint: "hostname:9999", + Bucket: "foobar", + Prefix: "", + UseHTTP: true, + Connections: 5, + }}, + {"s3:http://hostname:9999/foobar/", Config{ + Endpoint: "hostname:9999", + Bucket: "foobar", + Prefix: "", + UseHTTP: true, + Connections: 5, + }}, + {"s3:http://hostname:9999/bucket/prefix/directory", Config{ + Endpoint: "hostname:9999", + Bucket: "bucket", + Prefix: "prefix/directory", + UseHTTP: true, + Connections: 5, + }}, + {"s3:http://hostname:9999/bucket/prefix/directory/", Config{ + Endpoint: "hostname:9999", + Bucket: "bucket", + Prefix: "prefix/directory", + UseHTTP: true, + Connections: 5, + }}, +} + +func TestParseConfig(t *testing.T) { + for i, test := range configTests { + cfg, err := ParseConfig(test.s) + if err != nil { + t.Errorf("test %d:%s failed: %v", i, test.s, err) + continue + } + + if cfg != test.cfg { + t.Errorf("test %d:\ninput:\n %s\n wrong config, want:\n %v\ngot:\n %v", + i, test.s, test.cfg, cfg) + continue + } + } +} diff --git a/internal/backend/s3/s3.go b/internal/backend/s3/s3.go new file mode 100644 index 000000000..70a052868 --- /dev/null +++ b/internal/backend/s3/s3.go @@ -0,0 +1,523 @@ +package s3 + +import ( + "context" + "io" + "io/ioutil" + "net/http" + "os" + "path" + "strings" + "time" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + + "github.com/minio/minio-go" + "github.com/minio/minio-go/pkg/credentials" + + "github.com/restic/restic/internal/debug" +) + +// Backend stores data on an S3 endpoint. +type Backend struct { + client *minio.Client + sem *backend.Semaphore + cfg Config + backend.Layout +} + +// make sure that *Backend implements backend.Backend +var _ restic.Backend = &Backend{} + +const defaultLayout = "default" + +func open(cfg Config, rt http.RoundTripper) (*Backend, error) { + debug.Log("open, config %#v", cfg) + + if cfg.MaxRetries > 0 { + minio.MaxRetry = int(cfg.MaxRetries) + } + + // Chains all credential types, in the following order: + // - Static credentials provided by user + // - AWS env vars (i.e. AWS_ACCESS_KEY_ID) + // - Minio env vars (i.e. MINIO_ACCESS_KEY) + // - AWS creds file (i.e. AWS_SHARED_CREDENTIALS_FILE or ~/.aws/credentials) + // - Minio creds file (i.e. MINIO_SHARED_CREDENTIALS_FILE or ~/.mc/config.json) + // - IAM profile based credentials. (performs an HTTP + // call to a pre-defined endpoint, only valid inside + // configured ec2 instances) + creds := credentials.NewChainCredentials([]credentials.Provider{ + &credentials.EnvAWS{}, + &credentials.Static{ + Value: credentials.Value{ + AccessKeyID: cfg.KeyID, + SecretAccessKey: cfg.Secret, + }, + }, + &credentials.EnvMinio{}, + &credentials.FileAWSCredentials{}, + &credentials.FileMinioClient{}, + &credentials.IAM{ + Client: &http.Client{ + Transport: http.DefaultTransport, + }, + }, + }) + client, err := minio.NewWithCredentials(cfg.Endpoint, creds, !cfg.UseHTTP, "") + if err != nil { + return nil, errors.Wrap(err, "minio.NewWithCredentials") + } + + sem, err := backend.NewSemaphore(cfg.Connections) + if err != nil { + return nil, err + } + + be := &Backend{ + client: client, + sem: sem, + cfg: cfg, + } + + client.SetCustomTransport(rt) + + l, err := backend.ParseLayout(be, cfg.Layout, defaultLayout, cfg.Prefix) + if err != nil { + return nil, err + } + + be.Layout = l + + return be, nil +} + +// Open opens the S3 backend at bucket and region. The bucket is created if it +// does not exist yet. +func Open(cfg Config, rt http.RoundTripper) (restic.Backend, error) { + return open(cfg, rt) +} + +// Create opens the S3 backend at bucket and region and creates the bucket if +// it does not exist yet. +func Create(cfg Config, rt http.RoundTripper) (restic.Backend, error) { + be, err := open(cfg, rt) + if err != nil { + return nil, errors.Wrap(err, "open") + } + found, err := be.client.BucketExists(cfg.Bucket) + + if err != nil && be.IsAccessDenied(err) { + err = nil + found = true + } + + if err != nil { + debug.Log("BucketExists(%v) returned err %v", cfg.Bucket, err) + return nil, errors.Wrap(err, "client.BucketExists") + } + + if !found { + // create new bucket with default ACL in default region + err = be.client.MakeBucket(cfg.Bucket, "") + if err != nil { + return nil, errors.Wrap(err, "client.MakeBucket") + } + } + + return be, nil +} + +// IsAccessDenied returns true if the error is caused by Access Denied. +func (be *Backend) IsAccessDenied(err error) bool { + debug.Log("IsAccessDenied(%T, %#v)", err, err) + + if e, ok := errors.Cause(err).(minio.ErrorResponse); ok && e.Code == "AccessDenied" { + return true + } + + return false +} + +// IsNotExist returns true if the error is caused by a not existing file. +func (be *Backend) IsNotExist(err error) bool { + debug.Log("IsNotExist(%T, %#v)", err, err) + if os.IsNotExist(errors.Cause(err)) { + return true + } + + if e, ok := errors.Cause(err).(minio.ErrorResponse); ok && e.Code == "NoSuchKey" { + return true + } + + return false +} + +// Join combines path components with slashes. +func (be *Backend) Join(p ...string) string { + return path.Join(p...) +} + +type fileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time + isDir bool +} + +func (fi fileInfo) Name() string { return fi.name } // base name of the file +func (fi fileInfo) Size() int64 { return fi.size } // length in bytes for regular files; system-dependent for others +func (fi fileInfo) Mode() os.FileMode { return fi.mode } // file mode bits +func (fi fileInfo) ModTime() time.Time { return fi.modTime } // modification time +func (fi fileInfo) IsDir() bool { return fi.isDir } // abbreviation for Mode().IsDir() +func (fi fileInfo) Sys() interface{} { return nil } // underlying data source (can return nil) + +// ReadDir returns the entries for a directory. +func (be *Backend) ReadDir(dir string) (list []os.FileInfo, err error) { + debug.Log("ReadDir(%v)", dir) + + // make sure dir ends with a slash + if dir[len(dir)-1] != '/' { + dir += "/" + } + + done := make(chan struct{}) + defer close(done) + + for obj := range be.client.ListObjects(be.cfg.Bucket, dir, false, done) { + if obj.Err != nil { + return nil, err + } + + if obj.Key == "" { + continue + } + + name := strings.TrimPrefix(obj.Key, dir) + // Sometimes s3 returns an entry for the dir itself. Ignore it. + if name == "" { + continue + } + entry := fileInfo{ + name: name, + size: obj.Size, + modTime: obj.LastModified, + } + + if name[len(name)-1] == '/' { + entry.isDir = true + entry.mode = os.ModeDir | 0755 + entry.name = name[:len(name)-1] + } else { + entry.mode = 0644 + } + + list = append(list, entry) + } + + return list, nil +} + +// Location returns this backend's location (the bucket name). +func (be *Backend) Location() string { + return be.Join(be.cfg.Bucket, be.cfg.Prefix) +} + +// Path returns the path in the bucket that is used for this backend. +func (be *Backend) Path() string { + return be.cfg.Prefix +} + +// lenForFile returns the length of the file. +func lenForFile(f *os.File) (int64, error) { + fi, err := f.Stat() + if err != nil { + return 0, errors.Wrap(err, "Stat") + } + + pos, err := f.Seek(0, io.SeekCurrent) + if err != nil { + return 0, errors.Wrap(err, "Seek") + } + + size := fi.Size() - pos + return size, nil +} + +// Save stores data in the backend at the handle. +func (be *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error { + debug.Log("Save %v", h) + + if err := h.Valid(); err != nil { + return err + } + + objName := be.Filename(h) + + be.sem.GetToken() + defer be.sem.ReleaseToken() + + opts := minio.PutObjectOptions{} + opts.ContentType = "application/octet-stream" + + debug.Log("PutObject(%v, %v, %v)", be.cfg.Bucket, objName, rd.Length()) + n, err := be.client.PutObjectWithContext(ctx, be.cfg.Bucket, objName, ioutil.NopCloser(rd), int64(rd.Length()), opts) + + debug.Log("%v -> %v bytes, err %#v: %v", objName, n, err, err) + + return errors.Wrap(err, "client.PutObject") +} + +// wrapReader wraps an io.ReadCloser to run an additional function on Close. +type wrapReader struct { + io.ReadCloser + f func() +} + +func (wr wrapReader) Close() error { + err := wr.ReadCloser.Close() + wr.f() + return err +} + +// Load runs fn with a reader that yields the contents of the file at h at the +// given offset. +func (be *Backend) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error { + return backend.DefaultLoad(ctx, h, length, offset, be.openReader, fn) +} + +func (be *Backend) openReader(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) { + debug.Log("Load %v, length %v, offset %v from %v", h, length, offset, be.Filename(h)) + if err := h.Valid(); err != nil { + return nil, err + } + + if offset < 0 { + return nil, errors.New("offset is negative") + } + + if length < 0 { + return nil, errors.Errorf("invalid length %d", length) + } + + objName := be.Filename(h) + opts := minio.GetObjectOptions{} + + var err error + if length > 0 { + debug.Log("range: %v-%v", offset, offset+int64(length)-1) + err = opts.SetRange(offset, offset+int64(length)-1) + } else if offset > 0 { + debug.Log("range: %v-", offset) + err = opts.SetRange(offset, 0) + } + + if err != nil { + return nil, errors.Wrap(err, "SetRange") + } + + be.sem.GetToken() + coreClient := minio.Core{Client: be.client} + rd, err := coreClient.GetObjectWithContext(ctx, be.cfg.Bucket, objName, opts) + if err != nil { + be.sem.ReleaseToken() + return nil, err + } + + closeRd := wrapReader{ + ReadCloser: rd, + f: func() { + debug.Log("Close()") + be.sem.ReleaseToken() + }, + } + + return closeRd, err +} + +// Stat returns information about a blob. +func (be *Backend) Stat(ctx context.Context, h restic.Handle) (bi restic.FileInfo, err error) { + debug.Log("%v", h) + + objName := be.Filename(h) + var obj *minio.Object + + opts := minio.GetObjectOptions{} + + be.sem.GetToken() + obj, err = be.client.GetObjectWithContext(ctx, be.cfg.Bucket, objName, opts) + if err != nil { + debug.Log("GetObject() err %v", err) + be.sem.ReleaseToken() + return restic.FileInfo{}, errors.Wrap(err, "client.GetObject") + } + + // make sure that the object is closed properly. + defer func() { + e := obj.Close() + be.sem.ReleaseToken() + if err == nil { + err = errors.Wrap(e, "Close") + } + }() + + fi, err := obj.Stat() + if err != nil { + debug.Log("Stat() err %v", err) + return restic.FileInfo{}, errors.Wrap(err, "Stat") + } + + return restic.FileInfo{Size: fi.Size, Name: h.Name}, nil +} + +// Test returns true if a blob of the given type and name exists in the backend. +func (be *Backend) Test(ctx context.Context, h restic.Handle) (bool, error) { + found := false + objName := be.Filename(h) + + be.sem.GetToken() + _, err := be.client.StatObject(be.cfg.Bucket, objName, minio.StatObjectOptions{}) + be.sem.ReleaseToken() + + if err == nil { + found = true + } + + // If error, then not found + return found, nil +} + +// Remove removes the blob with the given name and type. +func (be *Backend) Remove(ctx context.Context, h restic.Handle) error { + objName := be.Filename(h) + + be.sem.GetToken() + err := be.client.RemoveObject(be.cfg.Bucket, objName) + be.sem.ReleaseToken() + + debug.Log("Remove(%v) at %v -> err %v", h, objName, err) + + if be.IsNotExist(err) { + err = nil + } + + return errors.Wrap(err, "client.RemoveObject") +} + +// List runs fn for each file in the backend which has the type t. When an +// error occurs (or fn returns an error), List stops and returns it. +func (be *Backend) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error { + debug.Log("listing %v", t) + + prefix, recursive := be.Basedir(t) + + // make sure prefix ends with a slash + if !strings.HasSuffix(prefix, "/") { + prefix += "/" + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // NB: unfortunately we can't protect this with be.sem.GetToken() here. + // Doing so would enable a deadlock situation (gh-1399), as ListObjects() + // starts its own goroutine and returns results via a channel. + listresp := be.client.ListObjects(be.cfg.Bucket, prefix, recursive, ctx.Done()) + + for obj := range listresp { + if obj.Err != nil { + return obj.Err + } + + m := strings.TrimPrefix(obj.Key, prefix) + if m == "" { + continue + } + + fi := restic.FileInfo{ + Name: path.Base(m), + Size: obj.Size, + } + + if ctx.Err() != nil { + return ctx.Err() + } + + err := fn(fi) + if err != nil { + return err + } + + if ctx.Err() != nil { + return ctx.Err() + } + } + + return ctx.Err() +} + +// Remove keys for a specified backend type. +func (be *Backend) removeKeys(ctx context.Context, t restic.FileType) error { + return be.List(ctx, restic.DataFile, func(fi restic.FileInfo) error { + return be.Remove(ctx, restic.Handle{Type: t, Name: fi.Name}) + }) +} + +// Delete removes all restic keys in the bucket. It will not remove the bucket itself. +func (be *Backend) Delete(ctx context.Context) error { + alltypes := []restic.FileType{ + restic.DataFile, + restic.KeyFile, + restic.LockFile, + restic.SnapshotFile, + restic.IndexFile} + + for _, t := range alltypes { + err := be.removeKeys(ctx, t) + if err != nil { + return nil + } + } + + return be.Remove(ctx, restic.Handle{Type: restic.ConfigFile}) +} + +// Close does nothing +func (be *Backend) Close() error { return nil } + +// Rename moves a file based on the new layout l. +func (be *Backend) Rename(h restic.Handle, l backend.Layout) error { + debug.Log("Rename %v to %v", h, l) + oldname := be.Filename(h) + newname := l.Filename(h) + + if oldname == newname { + debug.Log(" %v is already renamed", newname) + return nil + } + + debug.Log(" %v -> %v", oldname, newname) + + src := minio.NewSourceInfo(be.cfg.Bucket, oldname, nil) + + dst, err := minio.NewDestinationInfo(be.cfg.Bucket, newname, nil, nil) + if err != nil { + return errors.Wrap(err, "NewDestinationInfo") + } + + err = be.client.CopyObject(dst, src) + if err != nil && be.IsNotExist(err) { + debug.Log("copy failed: %v, seems to already have been renamed", err) + return nil + } + + if err != nil { + debug.Log("copy failed: %v", err) + return err + } + + return be.client.RemoveObject(be.cfg.Bucket, oldname) +} diff --git a/internal/backend/s3/s3_test.go b/internal/backend/s3/s3_test.go new file mode 100644 index 000000000..35a80bf7e --- /dev/null +++ b/internal/backend/s3/s3_test.go @@ -0,0 +1,328 @@ +package s3_test + +import ( + "context" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "net" + "net/http" + "os" + "os/exec" + "path/filepath" + "testing" + "time" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/backend/s3" + "github.com/restic/restic/internal/backend/test" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func mkdir(t testing.TB, dir string) { + err := os.MkdirAll(dir, 0700) + if err != nil { + t.Fatal(err) + } +} + +func runMinio(ctx context.Context, t testing.TB, dir, key, secret string) func() { + mkdir(t, filepath.Join(dir, "config")) + mkdir(t, filepath.Join(dir, "root")) + + cmd := exec.CommandContext(ctx, "minio", + "server", + "--address", "127.0.0.1:9000", + "--config-dir", filepath.Join(dir, "config"), + filepath.Join(dir, "root")) + cmd.Env = append(os.Environ(), + "MINIO_ACCESS_KEY="+key, + "MINIO_SECRET_KEY="+secret, + ) + cmd.Stderr = os.Stderr + + err := cmd.Start() + if err != nil { + t.Fatal(err) + } + + // wait until the TCP port is reachable + var success bool + for i := 0; i < 100; i++ { + time.Sleep(200 * time.Millisecond) + + c, err := net.Dial("tcp", "localhost:9000") + if err == nil { + success = true + if err := c.Close(); err != nil { + t.Fatal(err) + } + break + } + } + + if !success { + t.Fatal("unable to connect to minio server") + return nil + } + + return func() { + err = cmd.Process.Kill() + if err != nil { + t.Fatal(err) + } + + // ignore errors, we've killed the process + _ = cmd.Wait() + } +} + +func newRandomCredentials(t testing.TB) (key, secret string) { + buf := make([]byte, 10) + _, err := io.ReadFull(rand.Reader, buf) + if err != nil { + t.Fatal(err) + } + key = hex.EncodeToString(buf) + + _, err = io.ReadFull(rand.Reader, buf) + if err != nil { + t.Fatal(err) + } + secret = hex.EncodeToString(buf) + + return key, secret +} + +type MinioTestConfig struct { + s3.Config + + tempdir string + removeTempdir func() + stopServer func() +} + +func createS3(t testing.TB, cfg MinioTestConfig, tr http.RoundTripper) (be restic.Backend, err error) { + for i := 0; i < 10; i++ { + be, err = s3.Create(cfg.Config, tr) + if err != nil { + t.Logf("s3 open: try %d: error %v", i, err) + time.Sleep(500 * time.Millisecond) + continue + } + + break + } + + return be, err +} + +func newMinioTestSuite(ctx context.Context, t testing.TB) *test.Suite { + tr, err := backend.Transport(backend.TransportOptions{}) + if err != nil { + t.Fatalf("cannot create transport for tests: %v", err) + } + + return &test.Suite{ + // NewConfig returns a config for a new temporary backend that will be used in tests. + NewConfig: func() (interface{}, error) { + cfg := MinioTestConfig{} + + cfg.tempdir, cfg.removeTempdir = rtest.TempDir(t) + key, secret := newRandomCredentials(t) + cfg.stopServer = runMinio(ctx, t, cfg.tempdir, key, secret) + + cfg.Config = s3.NewConfig() + cfg.Config.Endpoint = "localhost:9000" + cfg.Config.Bucket = "restictestbucket" + cfg.Config.Prefix = fmt.Sprintf("test-%d", time.Now().UnixNano()) + cfg.Config.UseHTTP = true + cfg.Config.KeyID = key + cfg.Config.Secret = secret + return cfg, nil + }, + + // CreateFn is a function that creates a temporary repository for the tests. + Create: func(config interface{}) (restic.Backend, error) { + cfg := config.(MinioTestConfig) + + be, err := createS3(t, cfg, tr) + if err != nil { + return nil, err + } + + exists, err := be.Test(context.TODO(), restic.Handle{Type: restic.ConfigFile}) + if err != nil { + return nil, err + } + + if exists { + return nil, errors.New("config already exists") + } + + return be, nil + }, + + // OpenFn is a function that opens a previously created temporary repository. + Open: func(config interface{}) (restic.Backend, error) { + cfg := config.(MinioTestConfig) + return s3.Open(cfg.Config, tr) + }, + + // CleanupFn removes data created during the tests. + Cleanup: func(config interface{}) error { + cfg := config.(MinioTestConfig) + if cfg.stopServer != nil { + cfg.stopServer() + } + if cfg.removeTempdir != nil { + cfg.removeTempdir() + } + return nil + }, + } +} + +func TestBackendMinio(t *testing.T) { + defer func() { + if t.Skipped() { + rtest.SkipDisallowed(t, "restic/backend/s3.TestBackendMinio") + } + }() + + // try to find a minio binary + _, err := exec.LookPath("minio") + if err != nil { + t.Skip(err) + return + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + newMinioTestSuite(ctx, t).RunTests(t) +} + +func BenchmarkBackendMinio(t *testing.B) { + // try to find a minio binary + _, err := exec.LookPath("minio") + if err != nil { + t.Skip(err) + return + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + newMinioTestSuite(ctx, t).RunBenchmarks(t) +} + +func newS3TestSuite(t testing.TB) *test.Suite { + tr, err := backend.Transport(backend.TransportOptions{}) + if err != nil { + t.Fatalf("cannot create transport for tests: %v", err) + } + + return &test.Suite{ + // do not use excessive data + MinimalData: true, + + // NewConfig returns a config for a new temporary backend that will be used in tests. + NewConfig: func() (interface{}, error) { + s3cfg, err := s3.ParseConfig(os.Getenv("RESTIC_TEST_S3_REPOSITORY")) + if err != nil { + return nil, err + } + + cfg := s3cfg.(s3.Config) + cfg.KeyID = os.Getenv("RESTIC_TEST_S3_KEY") + cfg.Secret = os.Getenv("RESTIC_TEST_S3_SECRET") + cfg.Prefix = fmt.Sprintf("test-%d", time.Now().UnixNano()) + return cfg, nil + }, + + // CreateFn is a function that creates a temporary repository for the tests. + Create: func(config interface{}) (restic.Backend, error) { + cfg := config.(s3.Config) + + be, err := s3.Create(cfg, tr) + if err != nil { + return nil, err + } + + exists, err := be.Test(context.TODO(), restic.Handle{Type: restic.ConfigFile}) + if err != nil { + return nil, err + } + + if exists { + return nil, errors.New("config already exists") + } + + return be, nil + }, + + // OpenFn is a function that opens a previously created temporary repository. + Open: func(config interface{}) (restic.Backend, error) { + cfg := config.(s3.Config) + return s3.Open(cfg, tr) + }, + + // CleanupFn removes data created during the tests. + Cleanup: func(config interface{}) error { + cfg := config.(s3.Config) + + be, err := s3.Open(cfg, tr) + if err != nil { + return err + } + + return be.Delete(context.TODO()) + }, + } +} + +func TestBackendS3(t *testing.T) { + defer func() { + if t.Skipped() { + rtest.SkipDisallowed(t, "restic/backend/s3.TestBackendS3") + } + }() + + vars := []string{ + "RESTIC_TEST_S3_KEY", + "RESTIC_TEST_S3_SECRET", + "RESTIC_TEST_S3_REPOSITORY", + } + + for _, v := range vars { + if os.Getenv(v) == "" { + t.Skipf("environment variable %v not set", v) + return + } + } + + t.Logf("run tests") + newS3TestSuite(t).RunTests(t) +} + +func BenchmarkBackendS3(t *testing.B) { + vars := []string{ + "RESTIC_TEST_S3_KEY", + "RESTIC_TEST_S3_SECRET", + "RESTIC_TEST_S3_REPOSITORY", + } + + for _, v := range vars { + if os.Getenv(v) == "" { + t.Skipf("environment variable %v not set", v) + return + } + } + + t.Logf("run tests") + newS3TestSuite(t).RunBenchmarks(t) +} diff --git a/internal/backend/semaphore.go b/internal/backend/semaphore.go new file mode 100644 index 000000000..2146db2f3 --- /dev/null +++ b/internal/backend/semaphore.go @@ -0,0 +1,68 @@ +package backend + +import ( + "context" + "github.com/restic/restic/internal/errors" + "io" +) + +// Semaphore limits access to a restricted resource. +type Semaphore struct { + ch chan struct{} +} + +// NewSemaphore returns a new semaphore with capacity n. +func NewSemaphore(n uint) (*Semaphore, error) { + if n <= 0 { + return nil, errors.New("must be a positive number") + } + return &Semaphore{ + ch: make(chan struct{}, n), + }, nil +} + +// GetToken blocks until a Token is available. +func (s *Semaphore) GetToken() { + s.ch <- struct{}{} +} + +// ReleaseToken returns a token. +func (s *Semaphore) ReleaseToken() { + <-s.ch +} + +// ReleaseTokenOnClose wraps an io.ReadCloser to return a token on Close. Before returning the token, +// cancel, if provided, will be run to free up context resources. +func (s *Semaphore) ReleaseTokenOnClose(rc io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { + return &wrapReader{rc, false, func() { + if cancel != nil { + cancel() + } + s.ReleaseToken() + }} +} + +// wrapReader wraps an io.ReadCloser to run an additional function on Close. +type wrapReader struct { + io.ReadCloser + eofSeen bool + f func() +} + +func (wr *wrapReader) Read(p []byte) (int, error) { + if wr.eofSeen { + return 0, io.EOF + } + + n, err := wr.ReadCloser.Read(p) + if err == io.EOF { + wr.eofSeen = true + } + return n, err +} + +func (wr *wrapReader) Close() error { + err := wr.ReadCloser.Close() + wr.f() + return err +} diff --git a/internal/backend/sftp/config.go b/internal/backend/sftp/config.go new file mode 100644 index 000000000..90fe52c39 --- /dev/null +++ b/internal/backend/sftp/config.go @@ -0,0 +1,78 @@ +package sftp + +import ( + "net/url" + "path" + "strings" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/options" +) + +// Config collects all information required to connect to an sftp server. +type Config struct { + User, Host, Path string + Layout string `option:"layout" help:"use this backend directory layout (default: auto-detect)"` + Command string `option:"command" help:"specify command to create sftp connection"` +} + +func init() { + options.Register("sftp", Config{}) +} + +// ParseConfig parses the string s and extracts the sftp config. The +// supported configuration formats are sftp://user@host/directory +// and sftp:user@host:directory. The directory will be path Cleaned and can +// be an absolute path if it starts with a '/' (e.g. +// sftp://user@host//absolute and sftp:user@host:/absolute). +func ParseConfig(s string) (interface{}, error) { + var user, host, dir string + switch { + case strings.HasPrefix(s, "sftp://"): + // parse the "sftp://user@host/path" url format + url, err := url.Parse(s) + if err != nil { + return nil, errors.Wrap(err, "url.Parse") + } + if url.User != nil { + user = url.User.Username() + } + host = url.Host + dir = url.Path + if dir == "" { + return nil, errors.Errorf("invalid backend %q, no directory specified", s) + } + + dir = dir[1:] + case strings.HasPrefix(s, "sftp:"): + // parse the sftp:user@host:path format, which means we'll get + // "user@host:path" in s + s = s[5:] + // split user@host and path at the colon + data := strings.SplitN(s, ":", 2) + if len(data) < 2 { + return nil, errors.New("sftp: invalid format, hostname or path not found") + } + host = data[0] + dir = data[1] + // split user and host at the "@" + data = strings.SplitN(host, "@", 2) + if len(data) == 2 { + user = data[0] + host = data[1] + } + default: + return nil, errors.New(`invalid format, does not start with "sftp:"`) + } + + p := path.Clean(dir) + if strings.HasPrefix(p, "~") { + return nil, errors.Fatal("sftp path starts with the tilde (~) character, that fails for most sftp servers.\nUse a relative directory, most servers interpret this as relative to the user's home directory.") + } + + return Config{ + User: user, + Host: host, + Path: p, + }, nil +} diff --git a/internal/backend/sftp/config_test.go b/internal/backend/sftp/config_test.go new file mode 100644 index 000000000..44439005e --- /dev/null +++ b/internal/backend/sftp/config_test.go @@ -0,0 +1,90 @@ +package sftp + +import "testing" + +var configTests = []struct { + in string + cfg Config +}{ + // first form, user specified sftp://user@host/dir + { + "sftp://user@host/dir/subdir", + Config{User: "user", Host: "host", Path: "dir/subdir"}, + }, + { + "sftp://host/dir/subdir", + Config{Host: "host", Path: "dir/subdir"}, + }, + { + "sftp://host//dir/subdir", + Config{Host: "host", Path: "/dir/subdir"}, + }, + { + "sftp://host:10022//dir/subdir", + Config{Host: "host:10022", Path: "/dir/subdir"}, + }, + { + "sftp://user@host:10022//dir/subdir", + Config{User: "user", Host: "host:10022", Path: "/dir/subdir"}, + }, + { + "sftp://user@host/dir/subdir/../other", + Config{User: "user", Host: "host", Path: "dir/other"}, + }, + { + "sftp://user@host/dir///subdir", + Config{User: "user", Host: "host", Path: "dir/subdir"}, + }, + + // second form, user specified sftp:user@host:/dir + { + "sftp:user@host:/dir/subdir", + Config{User: "user", Host: "host", Path: "/dir/subdir"}, + }, + { + "sftp:host:../dir/subdir", + Config{Host: "host", Path: "../dir/subdir"}, + }, + { + "sftp:user@host:dir/subdir:suffix", + Config{User: "user", Host: "host", Path: "dir/subdir:suffix"}, + }, + { + "sftp:user@host:dir/subdir/../other", + Config{User: "user", Host: "host", Path: "dir/other"}, + }, + { + "sftp:user@host:dir///subdir", + Config{User: "user", Host: "host", Path: "dir/subdir"}, + }, +} + +func TestParseConfig(t *testing.T) { + for i, test := range configTests { + cfg, err := ParseConfig(test.in) + if err != nil { + t.Errorf("test %d:%s failed: %v", i, test.in, err) + continue + } + + if cfg != test.cfg { + t.Errorf("test %d:\ninput:\n %s\n wrong config, want:\n %v\ngot:\n %v", + i, test.in, test.cfg, cfg) + continue + } + } +} + +var configTestsInvalid = []string{ + "sftp://host:dir", +} + +func TestParseConfigInvalid(t *testing.T) { + for i, test := range configTestsInvalid { + _, err := ParseConfig(test) + if err == nil { + t.Errorf("test %d: invalid config %s did not return an error", i, test) + continue + } + } +} diff --git a/internal/backend/sftp/doc.go b/internal/backend/sftp/doc.go new file mode 100644 index 000000000..cd4a3a99b --- /dev/null +++ b/internal/backend/sftp/doc.go @@ -0,0 +1,3 @@ +// Package sftp implements repository storage in a directory on a remote server +// via the sftp protocol. +package sftp diff --git a/internal/backend/sftp/layout_test.go b/internal/backend/sftp/layout_test.go new file mode 100644 index 000000000..81e5f3240 --- /dev/null +++ b/internal/backend/sftp/layout_test.go @@ -0,0 +1,89 @@ +package sftp_test + +import ( + "context" + "fmt" + "path/filepath" + "testing" + + "github.com/restic/restic/internal/backend/sftp" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func TestLayout(t *testing.T) { + if sftpServer == "" { + t.Skip("sftp server binary not available") + } + + path, cleanup := rtest.TempDir(t) + defer cleanup() + + var tests = []struct { + filename string + layout string + failureExpected bool + datafiles map[string]bool + }{ + {"repo-layout-default.tar.gz", "", false, map[string]bool{ + "aa464e9fd598fe4202492ee317ffa728e82fa83a1de1a61996e5bd2d6651646c": false, + "fc919a3b421850f6fa66ad22ebcf91e433e79ffef25becf8aef7c7b1eca91683": false, + "c089d62788da14f8b7cbf77188305c0874906f0b73d3fce5a8869050e8d0c0e1": false, + }}, + {"repo-layout-s3legacy.tar.gz", "", false, map[string]bool{ + "fc919a3b421850f6fa66ad22ebcf91e433e79ffef25becf8aef7c7b1eca91683": false, + "c089d62788da14f8b7cbf77188305c0874906f0b73d3fce5a8869050e8d0c0e1": false, + "aa464e9fd598fe4202492ee317ffa728e82fa83a1de1a61996e5bd2d6651646c": false, + }}, + } + + for _, test := range tests { + t.Run(test.filename, func(t *testing.T) { + rtest.SetupTarTestFixture(t, path, filepath.Join("..", "testdata", test.filename)) + + repo := filepath.Join(path, "repo") + be, err := sftp.Open(sftp.Config{ + Command: fmt.Sprintf("%q -e", sftpServer), + Path: repo, + Layout: test.layout, + }) + if err != nil { + t.Fatal(err) + } + + if be == nil { + t.Fatalf("Open() returned nil but no error") + } + + datafiles := make(map[string]bool) + err = be.List(context.TODO(), restic.DataFile, func(fi restic.FileInfo) error { + datafiles[fi.Name] = false + return nil + }) + + if len(datafiles) == 0 { + t.Errorf("List() returned zero data files") + } + + for id := range test.datafiles { + if _, ok := datafiles[id]; !ok { + t.Errorf("datafile with id %v not found", id) + } + + datafiles[id] = true + } + + for id, v := range datafiles { + if !v { + t.Errorf("unexpected id %v found", id) + } + } + + if err = be.Close(); err != nil { + t.Errorf("Close() returned error %v", err) + } + + rtest.RemoveAll(t, filepath.Join(path, "repo")) + }) + } +} diff --git a/internal/backend/sftp/sftp.go b/internal/backend/sftp/sftp.go new file mode 100644 index 000000000..5ac60da82 --- /dev/null +++ b/internal/backend/sftp/sftp.go @@ -0,0 +1,537 @@ +package sftp + +import ( + "bufio" + "context" + "fmt" + "io" + "os" + "os/exec" + "path" + "strings" + "time" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/debug" + + "github.com/pkg/sftp" +) + +// SFTP is a backend in a directory accessed via SFTP. +type SFTP struct { + c *sftp.Client + p string + + cmd *exec.Cmd + result <-chan error + + backend.Layout + Config +} + +var _ restic.Backend = &SFTP{} + +const defaultLayout = "default" + +func startClient(program string, args ...string) (*SFTP, error) { + debug.Log("start client %v %v", program, args) + // Connect to a remote host and request the sftp subsystem via the 'ssh' + // command. This assumes that passwordless login is correctly configured. + cmd := exec.Command(program, args...) + + // prefix the errors with the program name + stderr, err := cmd.StderrPipe() + if err != nil { + return nil, errors.Wrap(err, "cmd.StderrPipe") + } + + go func() { + sc := bufio.NewScanner(stderr) + for sc.Scan() { + fmt.Fprintf(os.Stderr, "subprocess %v: %v\n", program, sc.Text()) + } + }() + + // get stdin and stdout + wr, err := cmd.StdinPipe() + if err != nil { + return nil, errors.Wrap(err, "cmd.StdinPipe") + } + rd, err := cmd.StdoutPipe() + if err != nil { + return nil, errors.Wrap(err, "cmd.StdoutPipe") + } + + bg, err := backend.StartForeground(cmd) + if err != nil { + return nil, errors.Wrap(err, "cmd.Start") + } + + // wait in a different goroutine + ch := make(chan error, 1) + go func() { + err := cmd.Wait() + debug.Log("ssh command exited, err %v", err) + for { + ch <- errors.Wrap(err, "ssh command exited") + } + }() + + // open the SFTP session + client, err := sftp.NewClientPipe(rd, wr) + if err != nil { + return nil, errors.Errorf("unable to start the sftp session, error: %v", err) + } + + err = bg() + if err != nil { + return nil, errors.Wrap(err, "bg") + } + + return &SFTP{c: client, cmd: cmd, result: ch}, nil +} + +// clientError returns an error if the client has exited. Otherwise, nil is +// returned immediately. +func (r *SFTP) clientError() error { + select { + case err := <-r.result: + debug.Log("client has exited with err %v", err) + return err + default: + } + + return nil +} + +// Open opens an sftp backend as described by the config by running +// "ssh" with the appropriate arguments (or cfg.Command, if set). The function +// preExec is run just before, postExec just after starting a program. +func Open(cfg Config) (*SFTP, error) { + debug.Log("open backend with config %#v", cfg) + + cmd, args, err := buildSSHCommand(cfg) + if err != nil { + return nil, err + } + + sftp, err := startClient(cmd, args...) + if err != nil { + debug.Log("unable to start program: %v", err) + return nil, err + } + + sftp.Layout, err = backend.ParseLayout(sftp, cfg.Layout, defaultLayout, cfg.Path) + if err != nil { + return nil, err + } + + debug.Log("layout: %v\n", sftp.Layout) + + sftp.Config = cfg + sftp.p = cfg.Path + return sftp, nil +} + +func (r *SFTP) mkdirAllDataSubdirs() error { + for _, d := range r.Paths() { + err := r.mkdirAll(d, backend.Modes.Dir) + debug.Log("mkdirAll %v -> %v", d, err) + if err != nil { + return err + } + } + + return nil +} + +// Join combines path components with slashes (according to the sftp spec). +func (r *SFTP) Join(p ...string) string { + return path.Join(p...) +} + +// ReadDir returns the entries for a directory. +func (r *SFTP) ReadDir(dir string) ([]os.FileInfo, error) { + fi, err := r.c.ReadDir(dir) + + // sftp client does not specify dir name on error, so add it here + err = errors.Wrapf(err, "(%v)", dir) + + return fi, err +} + +// IsNotExist returns true if the error is caused by a not existing file. +func (r *SFTP) IsNotExist(err error) bool { + err = errors.Cause(err) + + if os.IsNotExist(err) { + return true + } + + statusError, ok := err.(*sftp.StatusError) + if !ok { + return false + } + + return statusError.Error() == `sftp: "No such file" (SSH_FX_NO_SUCH_FILE)` +} + +func buildSSHCommand(cfg Config) (cmd string, args []string, err error) { + if cfg.Command != "" { + args, err := backend.SplitShellStrings(cfg.Command) + if err != nil { + return "", nil, err + } + + return args[0], args[1:], nil + } + + cmd = "ssh" + + hostport := strings.Split(cfg.Host, ":") + args = []string{hostport[0]} + if len(hostport) > 1 { + args = append(args, "-p", hostport[1]) + } + if cfg.User != "" { + args = append(args, "-l") + args = append(args, cfg.User) + } + args = append(args, "-s") + args = append(args, "sftp") + return cmd, args, nil +} + +// Create creates an sftp backend as described by the config by running "ssh" +// with the appropriate arguments (or cfg.Command, if set). The function +// preExec is run just before, postExec just after starting a program. +func Create(cfg Config) (*SFTP, error) { + cmd, args, err := buildSSHCommand(cfg) + if err != nil { + return nil, err + } + + sftp, err := startClient(cmd, args...) + if err != nil { + debug.Log("unable to start program: %v", err) + return nil, err + } + + sftp.Layout, err = backend.ParseLayout(sftp, cfg.Layout, defaultLayout, cfg.Path) + if err != nil { + return nil, err + } + + // test if config file already exists + _, err = sftp.c.Lstat(Join(cfg.Path, backend.Paths.Config)) + if err == nil { + return nil, errors.New("config file already exists") + } + + // create paths for data and refs + if err = sftp.mkdirAllDataSubdirs(); err != nil { + return nil, err + } + + err = sftp.Close() + if err != nil { + return nil, errors.Wrap(err, "Close") + } + + // open backend + return Open(cfg) +} + +// Location returns this backend's location (the directory name). +func (r *SFTP) Location() string { + return r.p +} + +func (r *SFTP) mkdirAll(dir string, mode os.FileMode) error { + // check if directory already exists + fi, err := r.c.Lstat(dir) + if err == nil { + if fi.IsDir() { + return nil + } + + return errors.Errorf("mkdirAll(%s): entry exists but is not a directory", dir) + } + + // create parent directories + errMkdirAll := r.mkdirAll(path.Dir(dir), backend.Modes.Dir) + + // create directory + errMkdir := r.c.Mkdir(dir) + + // test if directory was created successfully + fi, err = r.c.Lstat(dir) + if err != nil { + // return previous errors + return errors.Errorf("mkdirAll(%s): unable to create directories: %v, %v", dir, errMkdirAll, errMkdir) + } + + if !fi.IsDir() { + return errors.Errorf("mkdirAll(%s): entry exists but is not a directory", dir) + } + + // set mode + return r.c.Chmod(dir, mode) +} + +// Join joins the given paths and cleans them afterwards. This always uses +// forward slashes, which is required by sftp. +func Join(parts ...string) string { + return path.Clean(path.Join(parts...)) +} + +// Save stores data in the backend at the handle. +func (r *SFTP) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error { + debug.Log("Save %v", h) + if err := r.clientError(); err != nil { + return err + } + + if err := h.Valid(); err != nil { + return err + } + + filename := r.Filename(h) + + // create new file + f, err := r.c.OpenFile(filename, os.O_CREATE|os.O_EXCL|os.O_WRONLY) + + if r.IsNotExist(err) { + // error is caused by a missing directory, try to create it + mkdirErr := r.mkdirAll(r.Dirname(h), backend.Modes.Dir) + if mkdirErr != nil { + debug.Log("error creating dir %v: %v", r.Dirname(h), mkdirErr) + } else { + // try again + f, err = r.c.OpenFile(filename, os.O_CREATE|os.O_EXCL|os.O_WRONLY) + } + } + + if err != nil { + return errors.Wrap(err, "OpenFile") + } + + // save data + _, err = io.Copy(f, rd) + if err != nil { + _ = f.Close() + return errors.Wrap(err, "Write") + } + + err = f.Close() + if err != nil { + return errors.Wrap(err, "Close") + } + + return errors.Wrap(r.c.Chmod(filename, backend.Modes.File), "Chmod") +} + +// Load runs fn with a reader that yields the contents of the file at h at the +// given offset. +func (r *SFTP) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error { + return backend.DefaultLoad(ctx, h, length, offset, r.openReader, fn) +} + +func (r *SFTP) openReader(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) { + debug.Log("Load %v, length %v, offset %v", h, length, offset) + if err := h.Valid(); err != nil { + return nil, err + } + + if offset < 0 { + return nil, errors.New("offset is negative") + } + + f, err := r.c.Open(r.Filename(h)) + if err != nil { + return nil, err + } + + if offset > 0 { + _, err = f.Seek(offset, 0) + if err != nil { + _ = f.Close() + return nil, err + } + } + + if length > 0 { + return backend.LimitReadCloser(f, int64(length)), nil + } + + return f, nil +} + +// Stat returns information about a blob. +func (r *SFTP) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) { + debug.Log("Stat(%v)", h) + if err := r.clientError(); err != nil { + return restic.FileInfo{}, err + } + + if err := h.Valid(); err != nil { + return restic.FileInfo{}, err + } + + fi, err := r.c.Lstat(r.Filename(h)) + if err != nil { + return restic.FileInfo{}, errors.Wrap(err, "Lstat") + } + + return restic.FileInfo{Size: fi.Size(), Name: h.Name}, nil +} + +// Test returns true if a blob of the given type and name exists in the backend. +func (r *SFTP) Test(ctx context.Context, h restic.Handle) (bool, error) { + debug.Log("Test(%v)", h) + if err := r.clientError(); err != nil { + return false, err + } + + _, err := r.c.Lstat(r.Filename(h)) + if os.IsNotExist(errors.Cause(err)) { + return false, nil + } + + if err != nil { + return false, errors.Wrap(err, "Lstat") + } + + return true, nil +} + +// Remove removes the content stored at name. +func (r *SFTP) Remove(ctx context.Context, h restic.Handle) error { + debug.Log("Remove(%v)", h) + if err := r.clientError(); err != nil { + return err + } + + return r.c.Remove(r.Filename(h)) +} + +// List runs fn for each file in the backend which has the type t. When an +// error occurs (or fn returns an error), List stops and returns it. +func (r *SFTP) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error { + debug.Log("List %v", t) + + basedir, subdirs := r.Basedir(t) + walker := r.c.Walk(basedir) + for walker.Step() { + if walker.Err() != nil { + if r.IsNotExist(walker.Err()) { + debug.Log("ignoring non-existing directory") + return nil + } + return walker.Err() + } + + if walker.Path() == basedir { + continue + } + + if walker.Stat().IsDir() && !subdirs { + walker.SkipDir() + continue + } + + fi := walker.Stat() + if !fi.Mode().IsRegular() { + continue + } + + debug.Log("send %v\n", path.Base(walker.Path())) + + rfi := restic.FileInfo{ + Name: path.Base(walker.Path()), + Size: fi.Size(), + } + + if ctx.Err() != nil { + return ctx.Err() + } + + err := fn(rfi) + if err != nil { + return err + } + + if ctx.Err() != nil { + return ctx.Err() + } + } + + return ctx.Err() +} + +var closeTimeout = 2 * time.Second + +// Close closes the sftp connection and terminates the underlying command. +func (r *SFTP) Close() error { + debug.Log("Close") + if r == nil { + return nil + } + + err := r.c.Close() + debug.Log("Close returned error %v", err) + + // wait for closeTimeout before killing the process + select { + case err := <-r.result: + return err + case <-time.After(closeTimeout): + } + + if err := r.cmd.Process.Kill(); err != nil { + return err + } + + // get the error, but ignore it + <-r.result + return nil +} + +func (r *SFTP) deleteRecursive(name string) error { + entries, err := r.ReadDir(name) + if err != nil { + return errors.Wrap(err, "ReadDir") + } + + for _, fi := range entries { + itemName := r.Join(name, fi.Name()) + if fi.IsDir() { + err := r.deleteRecursive(itemName) + if err != nil { + return errors.Wrap(err, "ReadDir") + } + + err = r.c.RemoveDirectory(itemName) + if err != nil { + return errors.Wrap(err, "RemoveDirectory") + } + + continue + } + + err := r.c.Remove(itemName) + if err != nil { + return errors.Wrap(err, "ReadDir") + } + } + + return nil +} + +// Delete removes all data in the backend. +func (r *SFTP) Delete(context.Context) error { + return r.deleteRecursive(r.p) +} diff --git a/internal/backend/sftp/sftp_test.go b/internal/backend/sftp/sftp_test.go new file mode 100644 index 000000000..f32e04499 --- /dev/null +++ b/internal/backend/sftp/sftp_test.go @@ -0,0 +1,95 @@ +package sftp_test + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/restic/restic/internal/backend/sftp" + "github.com/restic/restic/internal/backend/test" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func findSFTPServerBinary() string { + for _, dir := range strings.Split(rtest.TestSFTPPath, ":") { + testpath := filepath.Join(dir, "sftp-server") + _, err := os.Stat(testpath) + if !os.IsNotExist(errors.Cause(err)) { + return testpath + } + } + + return "" +} + +var sftpServer = findSFTPServerBinary() + +func newTestSuite(t testing.TB) *test.Suite { + return &test.Suite{ + // NewConfig returns a config for a new temporary backend that will be used in tests. + NewConfig: func() (interface{}, error) { + dir, err := ioutil.TempDir(rtest.TestTempDir, "restic-test-sftp-") + if err != nil { + t.Fatal(err) + } + + t.Logf("create new backend at %v", dir) + + cfg := sftp.Config{ + Path: dir, + Command: fmt.Sprintf("%q -e", sftpServer), + } + return cfg, nil + }, + + // CreateFn is a function that creates a temporary repository for the tests. + Create: func(config interface{}) (restic.Backend, error) { + cfg := config.(sftp.Config) + return sftp.Create(cfg) + }, + + // OpenFn is a function that opens a previously created temporary repository. + Open: func(config interface{}) (restic.Backend, error) { + cfg := config.(sftp.Config) + return sftp.Open(cfg) + }, + + // CleanupFn removes data created during the tests. + Cleanup: func(config interface{}) error { + cfg := config.(sftp.Config) + if !rtest.TestCleanupTempDirs { + t.Logf("leaving test backend dir at %v", cfg.Path) + } + + rtest.RemoveAll(t, cfg.Path) + return nil + }, + } +} + +func TestBackendSFTP(t *testing.T) { + defer func() { + if t.Skipped() { + rtest.SkipDisallowed(t, "restic/backend/sftp.TestBackendSFTP") + } + }() + + if sftpServer == "" { + t.Skip("sftp server binary not found") + } + + newTestSuite(t).RunTests(t) +} + +func BenchmarkBackendSFTP(t *testing.B) { + if sftpServer == "" { + t.Skip("sftp server binary not found") + } + + newTestSuite(t).RunBenchmarks(t) +} diff --git a/internal/backend/sftp/sshcmd_test.go b/internal/backend/sftp/sshcmd_test.go new file mode 100644 index 000000000..dea811a35 --- /dev/null +++ b/internal/backend/sftp/sshcmd_test.go @@ -0,0 +1,52 @@ +package sftp + +import ( + "reflect" + "testing" +) + +var sshcmdTests = []struct { + cfg Config + cmd string + args []string +}{ + { + Config{User: "user", Host: "host", Path: "dir/subdir"}, + "ssh", + []string{"host", "-l", "user", "-s", "sftp"}, + }, + { + Config{Host: "host", Path: "dir/subdir"}, + "ssh", + []string{"host", "-s", "sftp"}, + }, + { + Config{Host: "host:10022", Path: "/dir/subdir"}, + "ssh", + []string{"host", "-p", "10022", "-s", "sftp"}, + }, + { + Config{User: "user", Host: "host:10022", Path: "/dir/subdir"}, + "ssh", + []string{"host", "-p", "10022", "-l", "user", "-s", "sftp"}, + }, +} + +func TestBuildSSHCommand(t *testing.T) { + for _, test := range sshcmdTests { + t.Run("", func(t *testing.T) { + cmd, args, err := buildSSHCommand(test.cfg) + if err != nil { + t.Fatal(err) + } + + if cmd != test.cmd { + t.Fatalf("cmd: want %v, got %v", test.cmd, cmd) + } + + if !reflect.DeepEqual(test.args, args) { + t.Fatalf("wrong args, want:\n %v\ngot:\n %v", test.args, args) + } + }) + } +} diff --git a/internal/backend/shell_split.go b/internal/backend/shell_split.go new file mode 100644 index 000000000..eff527616 --- /dev/null +++ b/internal/backend/shell_split.go @@ -0,0 +1,76 @@ +package backend + +import ( + "unicode" + + "github.com/restic/restic/internal/errors" +) + +// shellSplitter splits a command string into separater arguments. It supports +// single and double quoted strings. +type shellSplitter struct { + quote rune + lastChar rune +} + +func (s *shellSplitter) isSplitChar(c rune) bool { + // only test for quotes if the last char was not a backslash + if s.lastChar != '\\' { + + // quote ended + if s.quote != 0 && c == s.quote { + s.quote = 0 + return true + } + + // quote starts + if s.quote == 0 && (c == '"' || c == '\'') { + s.quote = c + return true + } + } + + s.lastChar = c + + // within quote + if s.quote != 0 { + return false + } + + // outside quote + return c == '\\' || unicode.IsSpace(c) +} + +// SplitShellStrings returns the list of shell strings from a shell command string. +func SplitShellStrings(data string) (strs []string, err error) { + s := &shellSplitter{} + + // derived from strings.SplitFunc + fieldStart := -1 // Set to -1 when looking for start of field. + for i, rune := range data { + if s.isSplitChar(rune) { + if fieldStart >= 0 { + strs = append(strs, data[fieldStart:i]) + fieldStart = -1 + } + } else if fieldStart == -1 { + fieldStart = i + } + } + if fieldStart >= 0 { // Last field might end at EOF. + strs = append(strs, data[fieldStart:]) + } + + switch s.quote { + case '\'': + return nil, errors.New("single-quoted string not terminated") + case '"': + return nil, errors.New("double-quoted string not terminated") + } + + if len(strs) == 0 { + return nil, errors.New("command string is empty") + } + + return strs, nil +} diff --git a/internal/backend/shell_split_test.go b/internal/backend/shell_split_test.go new file mode 100644 index 000000000..40ae84c63 --- /dev/null +++ b/internal/backend/shell_split_test.go @@ -0,0 +1,105 @@ +package backend + +import ( + "reflect" + "testing" +) + +func TestShellSplitter(t *testing.T) { + var tests = []struct { + data string + args []string + }{ + { + `foo`, + []string{"foo"}, + }, + { + `'foo'`, + []string{"foo"}, + }, + { + `foo bar baz`, + []string{"foo", "bar", "baz"}, + }, + { + `foo 'bar' baz`, + []string{"foo", "bar", "baz"}, + }, + { + `'bar box' baz`, + []string{"bar box", "baz"}, + }, + { + `"bar 'box'" baz`, + []string{"bar 'box'", "baz"}, + }, + { + `'bar "box"' baz`, + []string{`bar "box"`, "baz"}, + }, + { + `\"bar box baz`, + []string{`"bar`, "box", "baz"}, + }, + { + `"bar/foo/x" "box baz"`, + []string{"bar/foo/x", "box baz"}, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + args, err := SplitShellStrings(test.data) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(args, test.args) { + t.Fatalf("wrong args returned, want:\n %#v\ngot:\n %#v", + test.args, args) + } + }) + } +} + +func TestShellSplitterInvalid(t *testing.T) { + var tests = []struct { + data string + err string + }{ + { + "foo'", + "single-quoted string not terminated", + }, + { + `foo"`, + "double-quoted string not terminated", + }, + { + "foo 'bar", + "single-quoted string not terminated", + }, + { + `foo "bar`, + "double-quoted string not terminated", + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + args, err := SplitShellStrings(test.data) + if err == nil { + t.Fatalf("expected error not found: %v", test.err) + } + + if err.Error() != test.err { + t.Fatalf("expected error not found, want:\n %q\ngot:\n %q", test.err, err.Error()) + } + + if len(args) > 0 { + t.Fatalf("splitter returned fields from invalid data: %v", args) + } + }) + } +} diff --git a/internal/backend/swift/config.go b/internal/backend/swift/config.go new file mode 100644 index 000000000..9c152707e --- /dev/null +++ b/internal/backend/swift/config.go @@ -0,0 +1,110 @@ +package swift + +import ( + "os" + "strings" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/options" +) + +// Config contains basic configuration needed to specify swift location for a swift server +type Config struct { + UserName string + Domain string + APIKey string + AuthURL string + Region string + Tenant string + TenantID string + TenantDomain string + TrustID string + + StorageURL string + AuthToken string + + Container string + Prefix string + DefaultContainerPolicy string + + Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"` +} + +func init() { + options.Register("swift", Config{}) +} + +// NewConfig returns a new config with the default values filled in. +func NewConfig() Config { + return Config{ + Connections: 5, + } +} + +// ParseConfig parses the string s and extract swift's container name and prefix. +func ParseConfig(s string) (interface{}, error) { + data := strings.SplitN(s, ":", 3) + if len(data) != 3 { + return nil, errors.New("invalid URL, expected: swift:container-name:/[prefix]") + } + + scheme, container, prefix := data[0], data[1], data[2] + if scheme != "swift" { + return nil, errors.Errorf("unexpected prefix: %s", data[0]) + } + + if len(prefix) == 0 { + return nil, errors.Errorf("prefix is empty") + } + + if prefix[0] != '/' { + return nil, errors.Errorf("prefix does not start with slash (/)") + } + prefix = prefix[1:] + + cfg := NewConfig() + cfg.Container = container + cfg.Prefix = prefix + + return cfg, nil +} + +// ApplyEnvironment saves values from the environment to the config. +func ApplyEnvironment(prefix string, cfg interface{}) error { + c := cfg.(*Config) + for _, val := range []struct { + s *string + env string + }{ + // v2/v3 specific + {&c.UserName, prefix + "OS_USERNAME"}, + {&c.APIKey, prefix + "OS_PASSWORD"}, + {&c.Region, prefix + "OS_REGION_NAME"}, + {&c.AuthURL, prefix + "OS_AUTH_URL"}, + + // v3 specific + {&c.Domain, prefix + "OS_USER_DOMAIN_NAME"}, + {&c.Tenant, prefix + "OS_PROJECT_NAME"}, + {&c.TenantDomain, prefix + "OS_PROJECT_DOMAIN_NAME"}, + + // v2 specific + {&c.TenantID, prefix + "OS_TENANT_ID"}, + {&c.Tenant, prefix + "OS_TENANT_NAME"}, + + // v1 specific + {&c.AuthURL, prefix + "ST_AUTH"}, + {&c.UserName, prefix + "ST_USER"}, + {&c.APIKey, prefix + "ST_KEY"}, + + // Manual authentication + {&c.StorageURL, prefix + "OS_STORAGE_URL"}, + {&c.AuthToken, prefix + "OS_AUTH_TOKEN"}, + + {&c.DefaultContainerPolicy, prefix + "SWIFT_DEFAULT_CONTAINER_POLICY"}, + } { + if *val.s == "" { + *val.s = os.Getenv(val.env) + } + } + return nil +} diff --git a/internal/backend/swift/config_test.go b/internal/backend/swift/config_test.go new file mode 100644 index 000000000..35f091a9b --- /dev/null +++ b/internal/backend/swift/config_test.go @@ -0,0 +1,72 @@ +package swift + +import "testing" + +var configTests = []struct { + s string + cfg Config +}{ + { + "swift:cnt1:/", + Config{ + Container: "cnt1", + Prefix: "", + Connections: 5, + }, + }, + { + "swift:cnt2:/prefix", + Config{Container: "cnt2", + Prefix: "prefix", + Connections: 5, + }, + }, + { + "swift:cnt3:/prefix/longer", + Config{Container: "cnt3", + Prefix: "prefix/longer", + Connections: 5, + }, + }, +} + +func TestParseConfig(t *testing.T) { + for _, test := range configTests { + t.Run("", func(t *testing.T) { + v, err := ParseConfig(test.s) + if err != nil { + t.Fatalf("parsing %q failed: %v", test.s, err) + } + + cfg, ok := v.(Config) + if !ok { + t.Fatalf("wrong type returned, want Config, got %T", cfg) + } + + if cfg != test.cfg { + t.Fatalf("wrong output for %q, want:\n %#v\ngot:\n %#v", + test.s, test.cfg, cfg) + } + }) + } +} + +var configTestsInvalid = []string{ + "swift://hostname/container", + "swift:////", + "swift://", + "swift:////prefix", + "swift:container", + "swift:container:", + "swift:container/prefix", +} + +func TestParseConfigInvalid(t *testing.T) { + for i, test := range configTestsInvalid { + _, err := ParseConfig(test) + if err == nil { + t.Errorf("test %d: invalid config %s did not return an error", i, test) + continue + } + } +} diff --git a/internal/backend/swift/swift.go b/internal/backend/swift/swift.go new file mode 100644 index 000000000..8a17450d8 --- /dev/null +++ b/internal/backend/swift/swift.go @@ -0,0 +1,322 @@ +package swift + +import ( + "context" + "fmt" + "io" + "net/http" + "path" + "strings" + "time" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + + "github.com/ncw/swift" +) + +const connLimit = 10 + +// beSwift is a backend which stores the data on a swift endpoint. +type beSwift struct { + conn *swift.Connection + sem *backend.Semaphore + container string // Container name + prefix string // Prefix of object names in the container + backend.Layout +} + +// ensure statically that *beSwift implements restic.Backend. +var _ restic.Backend = &beSwift{} + +// Open opens the swift backend at a container in region. The container is +// created if it does not exist yet. +func Open(cfg Config, rt http.RoundTripper) (restic.Backend, error) { + debug.Log("config %#v", cfg) + + sem, err := backend.NewSemaphore(cfg.Connections) + if err != nil { + return nil, err + } + + be := &beSwift{ + conn: &swift.Connection{ + UserName: cfg.UserName, + Domain: cfg.Domain, + ApiKey: cfg.APIKey, + AuthUrl: cfg.AuthURL, + Region: cfg.Region, + Tenant: cfg.Tenant, + TenantId: cfg.TenantID, + TenantDomain: cfg.TenantDomain, + TrustId: cfg.TrustID, + StorageUrl: cfg.StorageURL, + AuthToken: cfg.AuthToken, + ConnectTimeout: time.Minute, + Timeout: time.Minute, + + Transport: rt, + }, + sem: sem, + container: cfg.Container, + prefix: cfg.Prefix, + Layout: &backend.DefaultLayout{ + Path: cfg.Prefix, + Join: path.Join, + }, + } + + // Authenticate if needed + if !be.conn.Authenticated() { + if err := be.conn.Authenticate(); err != nil { + return nil, errors.Wrap(err, "conn.Authenticate") + } + } + + // Ensure container exists + switch _, _, err := be.conn.Container(be.container); err { + case nil: + // Container exists + + case swift.ContainerNotFound: + err = be.createContainer(cfg.DefaultContainerPolicy) + if err != nil { + return nil, errors.Wrap(err, "beSwift.createContainer") + } + + default: + return nil, errors.Wrap(err, "conn.Container") + } + + return be, nil +} + +func (be *beSwift) createContainer(policy string) error { + var h swift.Headers + if policy != "" { + h = swift.Headers{ + "X-Storage-Policy": policy, + } + } + + return be.conn.ContainerCreate(be.container, h) +} + +// Location returns this backend's location (the container name). +func (be *beSwift) Location() string { + return be.container +} + +// Load runs fn with a reader that yields the contents of the file at h at the +// given offset. +func (be *beSwift) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error { + return backend.DefaultLoad(ctx, h, length, offset, be.openReader, fn) +} + +func (be *beSwift) openReader(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) { + debug.Log("Load %v, length %v, offset %v", h, length, offset) + if err := h.Valid(); err != nil { + return nil, err + } + + if offset < 0 { + return nil, errors.New("offset is negative") + } + + if length < 0 { + return nil, errors.Errorf("invalid length %d", length) + } + + objName := be.Filename(h) + + headers := swift.Headers{} + if offset > 0 { + headers["Range"] = fmt.Sprintf("bytes=%d-", offset) + } + + if length > 0 { + headers["Range"] = fmt.Sprintf("bytes=%d-%d", offset, offset+int64(length)-1) + } + + if _, ok := headers["Range"]; ok { + debug.Log("Load(%v) send range %v", h, headers["Range"]) + } + + be.sem.GetToken() + obj, _, err := be.conn.ObjectOpen(be.container, objName, false, headers) + if err != nil { + debug.Log(" err %v", err) + be.sem.ReleaseToken() + return nil, errors.Wrap(err, "conn.ObjectOpen") + } + + return be.sem.ReleaseTokenOnClose(obj, nil), nil +} + +// Save stores data in the backend at the handle. +func (be *beSwift) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error { + if err := h.Valid(); err != nil { + return err + } + + objName := be.Filename(h) + + debug.Log("Save %v at %v", h, objName) + + be.sem.GetToken() + defer be.sem.ReleaseToken() + + encoding := "binary/octet-stream" + + debug.Log("PutObject(%v, %v, %v)", be.container, objName, encoding) + _, err := be.conn.ObjectPut(be.container, objName, rd, true, "", encoding, nil) + debug.Log("%v, err %#v", objName, err) + + return errors.Wrap(err, "client.PutObject") +} + +// Stat returns information about a blob. +func (be *beSwift) Stat(ctx context.Context, h restic.Handle) (bi restic.FileInfo, err error) { + debug.Log("%v", h) + + objName := be.Filename(h) + + be.sem.GetToken() + defer be.sem.ReleaseToken() + + obj, _, err := be.conn.Object(be.container, objName) + if err != nil { + debug.Log("Object() err %v", err) + return restic.FileInfo{}, errors.Wrap(err, "conn.Object") + } + + return restic.FileInfo{Size: obj.Bytes, Name: h.Name}, nil +} + +// Test returns true if a blob of the given type and name exists in the backend. +func (be *beSwift) Test(ctx context.Context, h restic.Handle) (bool, error) { + objName := be.Filename(h) + + be.sem.GetToken() + defer be.sem.ReleaseToken() + + switch _, _, err := be.conn.Object(be.container, objName); err { + case nil: + return true, nil + + case swift.ObjectNotFound: + return false, nil + + default: + return false, errors.Wrap(err, "conn.Object") + } +} + +// Remove removes the blob with the given name and type. +func (be *beSwift) Remove(ctx context.Context, h restic.Handle) error { + objName := be.Filename(h) + + be.sem.GetToken() + defer be.sem.ReleaseToken() + + err := be.conn.ObjectDelete(be.container, objName) + debug.Log("Remove(%v) -> err %v", h, err) + return errors.Wrap(err, "conn.ObjectDelete") +} + +// List runs fn for each file in the backend which has the type t. When an +// error occurs (or fn returns an error), List stops and returns it. +func (be *beSwift) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error { + debug.Log("listing %v", t) + + prefix, _ := be.Basedir(t) + prefix += "/" + + err := be.conn.ObjectsWalk(be.container, &swift.ObjectsOpts{Prefix: prefix}, + func(opts *swift.ObjectsOpts) (interface{}, error) { + be.sem.GetToken() + newObjects, err := be.conn.Objects(be.container, opts) + be.sem.ReleaseToken() + + if err != nil { + return nil, errors.Wrap(err, "conn.ObjectNames") + } + for _, obj := range newObjects { + m := path.Base(strings.TrimPrefix(obj.Name, prefix)) + if m == "" { + continue + } + + fi := restic.FileInfo{ + Name: m, + Size: obj.Bytes, + } + + if ctx.Err() != nil { + return nil, ctx.Err() + } + + err := fn(fi) + if err != nil { + return nil, err + } + + if ctx.Err() != nil { + return nil, ctx.Err() + } + } + return newObjects, nil + }) + + if err != nil { + return err + } + + return ctx.Err() +} + +// Remove keys for a specified backend type. +func (be *beSwift) removeKeys(ctx context.Context, t restic.FileType) error { + return be.List(ctx, t, func(fi restic.FileInfo) error { + return be.Remove(ctx, restic.Handle{Type: t, Name: fi.Name}) + }) +} + +// IsNotExist returns true if the error is caused by a not existing file. +func (be *beSwift) IsNotExist(err error) bool { + if e, ok := errors.Cause(err).(*swift.Error); ok { + return e.StatusCode == http.StatusNotFound + } + + return false +} + +// Delete removes all restic objects in the container. +// It will not remove the container itself. +func (be *beSwift) Delete(ctx context.Context) error { + alltypes := []restic.FileType{ + restic.DataFile, + restic.KeyFile, + restic.LockFile, + restic.SnapshotFile, + restic.IndexFile} + + for _, t := range alltypes { + err := be.removeKeys(ctx, t) + if err != nil { + return nil + } + } + + err := be.Remove(ctx, restic.Handle{Type: restic.ConfigFile}) + if err != nil && !be.IsNotExist(err) { + return err + } + + return nil +} + +// Close does nothing +func (be *beSwift) Close() error { return nil } diff --git a/internal/backend/swift/swift_test.go b/internal/backend/swift/swift_test.go new file mode 100644 index 000000000..2c4781554 --- /dev/null +++ b/internal/backend/swift/swift_test.go @@ -0,0 +1,125 @@ +package swift_test + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/backend/swift" + "github.com/restic/restic/internal/backend/test" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func newSwiftTestSuite(t testing.TB) *test.Suite { + tr, err := backend.Transport(backend.TransportOptions{}) + if err != nil { + t.Fatalf("cannot create transport for tests: %v", err) + } + + return &test.Suite{ + // do not use excessive data + MinimalData: true, + + // wait for removals for at least 5m + WaitForDelayedRemoval: 5 * time.Minute, + + ErrorHandler: func(t testing.TB, be restic.Backend, err error) error { + if err == nil { + return nil + } + + if be.IsNotExist(err) { + t.Logf("swift: ignoring error %v", err) + return nil + } + + return err + }, + + // NewConfig returns a config for a new temporary backend that will be used in tests. + NewConfig: func() (interface{}, error) { + swiftcfg, err := swift.ParseConfig(os.Getenv("RESTIC_TEST_SWIFT")) + if err != nil { + return nil, err + } + + cfg := swiftcfg.(swift.Config) + if err = swift.ApplyEnvironment("RESTIC_TEST_", &cfg); err != nil { + return nil, err + } + cfg.Prefix += fmt.Sprintf("/test-%d", time.Now().UnixNano()) + t.Logf("using prefix %v", cfg.Prefix) + return cfg, nil + }, + + // CreateFn is a function that creates a temporary repository for the tests. + Create: func(config interface{}) (restic.Backend, error) { + cfg := config.(swift.Config) + + be, err := swift.Open(cfg, tr) + if err != nil { + return nil, err + } + + exists, err := be.Test(context.TODO(), restic.Handle{Type: restic.ConfigFile}) + if err != nil { + return nil, err + } + + if exists { + return nil, errors.New("config already exists") + } + + return be, nil + }, + + // OpenFn is a function that opens a previously created temporary repository. + Open: func(config interface{}) (restic.Backend, error) { + cfg := config.(swift.Config) + return swift.Open(cfg, tr) + }, + + // CleanupFn removes data created during the tests. + Cleanup: func(config interface{}) error { + cfg := config.(swift.Config) + + be, err := swift.Open(cfg, tr) + if err != nil { + return err + } + + return be.Delete(context.TODO()) + }, + } +} + +func TestBackendSwift(t *testing.T) { + defer func() { + if t.Skipped() { + rtest.SkipDisallowed(t, "restic/backend/swift.TestBackendSwift") + } + }() + + if os.Getenv("RESTIC_TEST_SWIFT") == "" { + t.Skip("RESTIC_TEST_SWIFT unset, skipping test") + return + } + + t.Logf("run tests") + newSwiftTestSuite(t).RunTests(t) +} + +func BenchmarkBackendSwift(t *testing.B) { + if os.Getenv("RESTIC_TEST_SWIFT") == "" { + t.Skip("RESTIC_TEST_SWIFT unset, skipping test") + return + } + + t.Logf("run tests") + newSwiftTestSuite(t).RunBenchmarks(t) +} diff --git a/internal/backend/test/benchmarks.go b/internal/backend/test/benchmarks.go new file mode 100644 index 000000000..302768f2e --- /dev/null +++ b/internal/backend/test/benchmarks.go @@ -0,0 +1,165 @@ +package test + +import ( + "bytes" + "context" + "io" + "testing" + + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/test" +) + +func saveRandomFile(t testing.TB, be restic.Backend, length int) ([]byte, restic.Handle) { + data := test.Random(23, length) + id := restic.Hash(data) + handle := restic.Handle{Type: restic.DataFile, Name: id.String()} + err := be.Save(context.TODO(), handle, restic.NewByteReader(data)) + if err != nil { + t.Fatalf("Save() error: %+v", err) + } + return data, handle +} + +func remove(t testing.TB, be restic.Backend, h restic.Handle) { + if err := be.Remove(context.TODO(), h); err != nil { + t.Fatalf("Remove() returned error: %v", err) + } +} + +// BenchmarkLoadFile benchmarks the Load() method of a backend by +// loading a complete file. +func (s *Suite) BenchmarkLoadFile(t *testing.B) { + be := s.open(t) + defer s.close(t, be) + + length := 1<<24 + 2123 + data, handle := saveRandomFile(t, be, length) + defer remove(t, be, handle) + + buf := make([]byte, length) + + t.SetBytes(int64(length)) + t.ResetTimer() + + for i := 0; i < t.N; i++ { + var n int + err := be.Load(context.TODO(), handle, 0, 0, func(rd io.Reader) (ierr error) { + n, ierr = io.ReadFull(rd, buf) + return ierr + }) + if err != nil { + t.Fatal(err) + } + + if n != length { + t.Fatalf("wrong number of bytes read: want %v, got %v", length, n) + } + + if !bytes.Equal(data, buf) { + t.Fatalf("wrong bytes returned") + } + } +} + +// BenchmarkLoadPartialFile benchmarks the Load() method of a backend by +// loading the remainder of a file starting at a given offset. +func (s *Suite) BenchmarkLoadPartialFile(t *testing.B) { + be := s.open(t) + defer s.close(t, be) + + datalength := 1<<24 + 2123 + data, handle := saveRandomFile(t, be, datalength) + defer remove(t, be, handle) + + testLength := datalength/4 + 555 + + buf := make([]byte, testLength) + + t.SetBytes(int64(testLength)) + t.ResetTimer() + + for i := 0; i < t.N; i++ { + var n int + err := be.Load(context.TODO(), handle, testLength, 0, func(rd io.Reader) (ierr error) { + n, ierr = io.ReadFull(rd, buf) + return ierr + }) + if err != nil { + t.Fatal(err) + } + + if n != testLength { + t.Fatalf("wrong number of bytes read: want %v, got %v", testLength, n) + } + + if !bytes.Equal(data[:testLength], buf) { + t.Fatalf("wrong bytes returned") + } + + } +} + +// BenchmarkLoadPartialFileOffset benchmarks the Load() method of a +// backend by loading a number of bytes of a file starting at a given offset. +func (s *Suite) BenchmarkLoadPartialFileOffset(t *testing.B) { + be := s.open(t) + defer s.close(t, be) + + datalength := 1<<24 + 2123 + data, handle := saveRandomFile(t, be, datalength) + defer remove(t, be, handle) + + testLength := datalength/4 + 555 + testOffset := 8273 + + buf := make([]byte, testLength) + + t.SetBytes(int64(testLength)) + t.ResetTimer() + + for i := 0; i < t.N; i++ { + var n int + err := be.Load(context.TODO(), handle, testLength, int64(testOffset), func(rd io.Reader) (ierr error) { + n, ierr = io.ReadFull(rd, buf) + return ierr + }) + if err != nil { + t.Fatal(err) + } + + if n != testLength { + t.Fatalf("wrong number of bytes read: want %v, got %v", testLength, n) + } + + if !bytes.Equal(data[testOffset:testOffset+testLength], buf) { + t.Fatalf("wrong bytes returned") + } + + } +} + +// BenchmarkSave benchmarks the Save() method of a backend. +func (s *Suite) BenchmarkSave(t *testing.B) { + be := s.open(t) + defer s.close(t, be) + + length := 1<<24 + 2123 + data := test.Random(23, length) + id := restic.Hash(data) + handle := restic.Handle{Type: restic.DataFile, Name: id.String()} + + rd := restic.NewByteReader(data) + t.SetBytes(int64(length)) + t.ResetTimer() + + for i := 0; i < t.N; i++ { + if err := be.Save(context.TODO(), handle, rd); err != nil { + t.Fatal(err) + } + + if err := be.Remove(context.TODO(), handle); err != nil { + t.Fatal(err) + } + } +} diff --git a/internal/backend/test/doc.go b/internal/backend/test/doc.go new file mode 100644 index 000000000..c1704d2c9 --- /dev/null +++ b/internal/backend/test/doc.go @@ -0,0 +1,42 @@ +// Package test contains a test suite with benchmarks for restic backends. +// +// Overview +// +// For the test suite to work a few functions need to be implemented to create +// new config, create a backend, open it and run cleanup tasks afterwards. The +// Suite struct has fields for each function. +// +// So for a new backend, a Suite needs to be built with callback functions, +// then the methods RunTests() and RunBenchmarks() can be used to run the +// individual tests and benchmarks as subtests/subbenchmarks. +// +// Example +// +// Assuming a *Suite is returned by newTestSuite(), the tests and benchmarks +// can be run like this: +// func newTestSuite(t testing.TB) *test.Suite { +// return &test.Suite{ +// Create: func(cfg interface{}) (restic.Backend, error) { +// [...] +// }, +// [...] +// } +// } +// +// func TestSuiteBackendMem(t *testing.T) { +// newTestSuite(t).RunTests(t) +// } +// +// func BenchmarkSuiteBackendMem(b *testing.B) { +// newTestSuite(b).RunBenchmarks(b) +// } +// +// The functions are run in alphabetical order. +// +// Add new tests +// +// A new test or benchmark can be added by implementing a method on *Suite +// with the name starting with "Test" and a single *testing.T parameter for +// test. For benchmarks, the name must start with "Benchmark" and the parameter +// is a *testing.B +package test diff --git a/internal/backend/test/suite.go b/internal/backend/test/suite.go new file mode 100644 index 000000000..342ac38e4 --- /dev/null +++ b/internal/backend/test/suite.go @@ -0,0 +1,185 @@ +package test + +import ( + "reflect" + "strings" + "testing" + "time" + + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/test" +) + +// Suite implements a test suite for restic backends. +type Suite struct { + // Config should be used to configure the backend. + Config interface{} + + // NewConfig returns a config for a new temporary backend that will be used in tests. + NewConfig func() (interface{}, error) + + // CreateFn is a function that creates a temporary repository for the tests. + Create func(cfg interface{}) (restic.Backend, error) + + // OpenFn is a function that opens a previously created temporary repository. + Open func(cfg interface{}) (restic.Backend, error) + + // CleanupFn removes data created during the tests. + Cleanup func(cfg interface{}) error + + // MinimalData instructs the tests to not use excessive data. + MinimalData bool + + // WaitForDelayedRemoval is set to a non-zero value to instruct the test + // suite to wait for this amount of time until a file that was removed + // really disappeared. + WaitForDelayedRemoval time.Duration + + // ErrorHandler allows ignoring certain errors. + ErrorHandler func(testing.TB, restic.Backend, error) error +} + +// RunTests executes all defined tests as subtests of t. +func (s *Suite) RunTests(t *testing.T) { + var err error + s.Config, err = s.NewConfig() + if err != nil { + t.Fatal(err) + } + + // test create/open functions first + be := s.create(t) + s.close(t, be) + + for _, test := range s.testFuncs(t) { + t.Run(test.Name, test.Fn) + } + + if !test.TestCleanupTempDirs { + t.Logf("not cleaning up backend") + return + } + + if err = s.Cleanup(s.Config); err != nil { + t.Fatal(err) + } +} + +type testFunction struct { + Name string + Fn func(*testing.T) +} + +func (s *Suite) testFuncs(t testing.TB) (funcs []testFunction) { + tpe := reflect.TypeOf(s) + v := reflect.ValueOf(s) + + for i := 0; i < tpe.NumMethod(); i++ { + methodType := tpe.Method(i) + name := methodType.Name + + // discard functions which do not have the right name + if !strings.HasPrefix(name, "Test") { + continue + } + + iface := v.Method(i).Interface() + f, ok := iface.(func(*testing.T)) + if !ok { + t.Logf("warning: function %v of *Suite has the wrong signature for a test function\nwant: func(*testing.T),\nhave: %T", + name, iface) + continue + } + + funcs = append(funcs, testFunction{ + Name: name, + Fn: f, + }) + } + + return funcs +} + +type benchmarkFunction struct { + Name string + Fn func(*testing.B) +} + +func (s *Suite) benchmarkFuncs(t testing.TB) (funcs []benchmarkFunction) { + tpe := reflect.TypeOf(s) + v := reflect.ValueOf(s) + + for i := 0; i < tpe.NumMethod(); i++ { + methodType := tpe.Method(i) + name := methodType.Name + + // discard functions which do not have the right name + if !strings.HasPrefix(name, "Benchmark") { + continue + } + + iface := v.Method(i).Interface() + f, ok := iface.(func(*testing.B)) + if !ok { + t.Logf("warning: function %v of *Suite has the wrong signature for a test function\nwant: func(*testing.T),\nhave: %T", + name, iface) + continue + } + + funcs = append(funcs, benchmarkFunction{ + Name: name, + Fn: f, + }) + } + + return funcs +} + +// RunBenchmarks executes all defined benchmarks as subtests of b. +func (s *Suite) RunBenchmarks(b *testing.B) { + var err error + s.Config, err = s.NewConfig() + if err != nil { + b.Fatal(err) + } + + // test create/open functions first + be := s.create(b) + s.close(b, be) + + for _, test := range s.benchmarkFuncs(b) { + b.Run(test.Name, test.Fn) + } + + if !test.TestCleanupTempDirs { + b.Logf("not cleaning up backend") + return + } + + if err = s.Cleanup(s.Config); err != nil { + b.Fatal(err) + } +} + +func (s *Suite) create(t testing.TB) restic.Backend { + be, err := s.Create(s.Config) + if err != nil { + t.Fatal(err) + } + return be +} + +func (s *Suite) open(t testing.TB) restic.Backend { + be, err := s.Open(s.Config) + if err != nil { + t.Fatal(err) + } + return be +} + +func (s *Suite) close(t testing.TB, be restic.Backend) { + err := be.Close() + if err != nil { + t.Fatal(err) + } +} diff --git a/internal/backend/test/tests.go b/internal/backend/test/tests.go new file mode 100644 index 000000000..dec1e0bee --- /dev/null +++ b/internal/backend/test/tests.go @@ -0,0 +1,827 @@ +package test + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "math/rand" + "os" + "reflect" + "sort" + "testing" + "time" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + + "github.com/restic/restic/internal/test" + + "github.com/restic/restic/internal/backend" +) + +func seedRand(t testing.TB) { + seed := time.Now().UnixNano() + rand.Seed(seed) + t.Logf("rand initialized with seed %d", seed) +} + +// TestCreateWithConfig tests that creating a backend in a location which already +// has a config file fails. +func (s *Suite) TestCreateWithConfig(t *testing.T) { + b := s.open(t) + defer s.close(t, b) + + // remove a config if present + cfgHandle := restic.Handle{Type: restic.ConfigFile} + cfgPresent, err := b.Test(context.TODO(), cfgHandle) + if err != nil { + t.Fatalf("unable to test for config: %+v", err) + } + + if cfgPresent { + remove(t, b, cfgHandle) + } + + // save a config + store(t, b, restic.ConfigFile, []byte("test config")) + + // now create the backend again, this must fail + _, err = s.Create(s.Config) + if err == nil { + t.Fatalf("expected error not found for creating a backend with an existing config file") + } + + // remove config + err = b.Remove(context.TODO(), restic.Handle{Type: restic.ConfigFile, Name: ""}) + if err != nil { + t.Fatalf("unexpected error removing config: %+v", err) + } +} + +// TestLocation tests that a location string is returned. +func (s *Suite) TestLocation(t *testing.T) { + b := s.open(t) + defer s.close(t, b) + + l := b.Location() + if l == "" { + t.Fatalf("invalid location string %q", l) + } +} + +// TestConfig saves and loads a config from the backend. +func (s *Suite) TestConfig(t *testing.T) { + b := s.open(t) + defer s.close(t, b) + + var testString = "Config" + + // create config and read it back + _, err := backend.LoadAll(context.TODO(), b, restic.Handle{Type: restic.ConfigFile}) + if err == nil { + t.Fatalf("did not get expected error for non-existing config") + } + + err = b.Save(context.TODO(), restic.Handle{Type: restic.ConfigFile}, restic.NewByteReader([]byte(testString))) + if err != nil { + t.Fatalf("Save() error: %+v", err) + } + + // try accessing the config with different names, should all return the + // same config + for _, name := range []string{"", "foo", "bar", "0000000000000000000000000000000000000000000000000000000000000000"} { + h := restic.Handle{Type: restic.ConfigFile, Name: name} + buf, err := backend.LoadAll(context.TODO(), b, h) + if err != nil { + t.Fatalf("unable to read config with name %q: %+v", name, err) + } + + if string(buf) != testString { + t.Fatalf("wrong data returned, want %q, got %q", testString, string(buf)) + } + } + + // remove the config + remove(t, b, restic.Handle{Type: restic.ConfigFile}) +} + +// TestLoad tests the backend's Load function. +func (s *Suite) TestLoad(t *testing.T) { + seedRand(t) + + b := s.open(t) + defer s.close(t, b) + + noop := func(rd io.Reader) error { + return nil + } + + err := b.Load(context.TODO(), restic.Handle{}, 0, 0, noop) + if err == nil { + t.Fatalf("Load() did not return an error for invalid handle") + } + + err = testLoad(b, restic.Handle{Type: restic.DataFile, Name: "foobar"}, 0, 0) + if err == nil { + t.Fatalf("Load() did not return an error for non-existing blob") + } + + length := rand.Intn(1<<24) + 2000 + + data := test.Random(23, length) + id := restic.Hash(data) + + handle := restic.Handle{Type: restic.DataFile, Name: id.String()} + err = b.Save(context.TODO(), handle, restic.NewByteReader(data)) + if err != nil { + t.Fatalf("Save() error: %+v", err) + } + + t.Logf("saved %d bytes as %v", length, handle) + + err = b.Load(context.TODO(), handle, 100, -1, noop) + if err == nil { + t.Fatalf("Load() returned no error for negative offset!") + } + + err = b.Load(context.TODO(), handle, 0, 0, func(rd io.Reader) error { + _, err := io.Copy(ioutil.Discard, rd) + if err != nil { + t.Fatal(err) + } + return errors.Errorf("deliberate error") + }) + if err == nil { + t.Fatalf("Load() did not propagate consumer error!") + } + if err.Error() != "deliberate error" { + t.Fatalf("Load() did not correctly propagate consumer error!") + } + + loadTests := 50 + if s.MinimalData { + loadTests = 10 + } + + for i := 0; i < loadTests; i++ { + l := rand.Intn(length + 2000) + o := rand.Intn(length + 2000) + + d := data + if o < len(d) { + d = d[o:] + } else { + t.Logf("offset == length, skipping test") + continue + } + + getlen := l + if l >= len(d) && rand.Float32() >= 0.5 { + getlen = 0 + } + + if l > 0 && l < len(d) { + d = d[:l] + } + + var buf []byte + err := b.Load(context.TODO(), handle, getlen, int64(o), func(rd io.Reader) (ierr error) { + buf, ierr = ioutil.ReadAll(rd) + return ierr + }) + if err != nil { + t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen) + t.Errorf("Load(%d, %d) returned unexpected error: %+v", l, o, err) + continue + } + + if l == 0 && len(buf) != len(d) { + t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen) + t.Errorf("Load(%d, %d) wrong number of bytes read: want %d, got %d", l, o, len(d), len(buf)) + continue + } + + if l > 0 && l <= len(d) && len(buf) != l { + t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen) + t.Errorf("Load(%d, %d) wrong number of bytes read: want %d, got %d", l, o, l, len(buf)) + continue + } + + if l > len(d) && len(buf) != len(d) { + t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen) + t.Errorf("Load(%d, %d) wrong number of bytes read for overlong read: want %d, got %d", l, o, l, len(buf)) + continue + } + + if !bytes.Equal(buf, d) { + t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen) + t.Errorf("Load(%d, %d) returned wrong bytes", l, o) + continue + } + } + + test.OK(t, b.Remove(context.TODO(), handle)) +} + +// TestList makes sure that the backend implements List() pagination correctly. +func (s *Suite) TestList(t *testing.T) { + seedRand(t) + + numTestFiles := rand.Intn(20) + 20 + + b := s.open(t) + defer s.close(t, b) + + // Check that the backend is empty to start with + var found []string + err := b.List(context.TODO(), restic.DataFile, func(fi restic.FileInfo) error { + found = append(found, fi.Name) + return nil + }) + if err != nil { + t.Fatalf("List returned error %v", err) + } + if found != nil { + t.Fatalf("backend not empty at start of test - contains: %v", found) + } + + list1 := make(map[restic.ID]int64) + + for i := 0; i < numTestFiles; i++ { + data := test.Random(rand.Int(), rand.Intn(100)+55) + id := restic.Hash(data) + h := restic.Handle{Type: restic.DataFile, Name: id.String()} + err := b.Save(context.TODO(), h, restic.NewByteReader(data)) + if err != nil { + t.Fatal(err) + } + list1[id] = int64(len(data)) + } + + t.Logf("wrote %v files", len(list1)) + + var tests = []struct { + maxItems int + }{ + {11}, {23}, {numTestFiles}, {numTestFiles + 10}, {numTestFiles + 1123}, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("max-%v", test.maxItems), func(t *testing.T) { + list2 := make(map[restic.ID]int64) + + type setter interface { + SetListMaxItems(int) + } + + if s, ok := b.(setter); ok { + t.Logf("setting max list items to %d", test.maxItems) + s.SetListMaxItems(test.maxItems) + } + + err := b.List(context.TODO(), restic.DataFile, func(fi restic.FileInfo) error { + id, err := restic.ParseID(fi.Name) + if err != nil { + t.Fatal(err) + } + list2[id] = fi.Size + return nil + }) + + if err != nil { + t.Fatalf("List returned error %v", err) + } + + t.Logf("loaded %v IDs from backend", len(list2)) + + for id, size := range list1 { + size2, ok := list2[id] + if !ok { + t.Errorf("id %v not returned by List()", id.Str()) + } + + if size != size2 { + t.Errorf("wrong size for id %v returned: want %v, got %v", id.Str(), size, size2) + } + } + + for id := range list2 { + _, ok := list1[id] + if !ok { + t.Errorf("extra id %v returned by List()", id.Str()) + } + } + }) + } + + t.Logf("remove %d files", numTestFiles) + handles := make([]restic.Handle, 0, len(list1)) + for id := range list1 { + handles = append(handles, restic.Handle{Type: restic.DataFile, Name: id.String()}) + } + + err = s.delayedRemove(t, b, handles...) + if err != nil { + t.Fatal(err) + } +} + +// TestListCancel tests that the context is respected and the error is returned by List. +func (s *Suite) TestListCancel(t *testing.T) { + seedRand(t) + + numTestFiles := 5 + + b := s.open(t) + defer s.close(t, b) + + testFiles := make([]restic.Handle, 0, numTestFiles) + + for i := 0; i < numTestFiles; i++ { + data := []byte(fmt.Sprintf("random test blob %v", i)) + id := restic.Hash(data) + h := restic.Handle{Type: restic.DataFile, Name: id.String()} + err := b.Save(context.TODO(), h, restic.NewByteReader(data)) + if err != nil { + t.Fatal(err) + } + testFiles = append(testFiles, h) + } + + t.Run("Cancelled", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + cancel() + + // pass in a cancelled context + err := b.List(ctx, restic.DataFile, func(fi restic.FileInfo) error { + t.Errorf("got FileInfo %v for cancelled context", fi) + return nil + }) + + if errors.Cause(err) != context.Canceled { + t.Fatalf("expected error not found, want %v, got %v", context.Canceled, errors.Cause(err)) + } + }) + + t.Run("First", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + i := 0 + err := b.List(ctx, restic.DataFile, func(fi restic.FileInfo) error { + i++ + // cancel the context on the first file + if i == 1 { + cancel() + } + return nil + }) + + if errors.Cause(err) != context.Canceled { + t.Fatalf("expected error not found, want %v, got %v", context.Canceled, err) + } + + if i != 1 { + t.Fatalf("wrong number of files returned by List, want %v, got %v", 1, i) + } + }) + + t.Run("Last", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + i := 0 + err := b.List(ctx, restic.DataFile, func(fi restic.FileInfo) error { + // cancel the context at the last file + i++ + if i == numTestFiles { + cancel() + } + return nil + }) + + if errors.Cause(err) != context.Canceled { + t.Fatalf("expected error not found, want %v, got %v", context.Canceled, err) + } + + if i != numTestFiles { + t.Fatalf("wrong number of files returned by List, want %v, got %v", numTestFiles, i) + } + }) + + t.Run("Timeout", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + // rather large timeout, let's try to get at least one item + timeout := time.Second + + ctxTimeout, _ := context.WithTimeout(ctx, timeout) + + i := 0 + // pass in a context with a timeout + err := b.List(ctxTimeout, restic.DataFile, func(fi restic.FileInfo) error { + i++ + + // wait until the context is cancelled + <-ctxTimeout.Done() + return nil + }) + + if errors.Cause(err) != context.DeadlineExceeded { + t.Fatalf("expected error not found, want %#v, got %#v", context.DeadlineExceeded, err) + } + + if i > 2 { + t.Fatalf("wrong number of files returned by List, want <= 2, got %v", i) + } + }) + + err := s.delayedRemove(t, b, testFiles...) + if err != nil { + t.Fatal(err) + } +} + +type errorCloser struct { + io.ReadSeeker + l int64 + t testing.TB +} + +func (ec errorCloser) Close() error { + ec.t.Error("forbidden method close was called") + return errors.New("forbidden method close was called") +} + +func (ec errorCloser) Length() int64 { + return ec.l +} + +func (ec errorCloser) Rewind() error { + _, err := ec.ReadSeeker.Seek(0, io.SeekStart) + return err +} + +// TestSave tests saving data in the backend. +func (s *Suite) TestSave(t *testing.T) { + seedRand(t) + + b := s.open(t) + defer s.close(t, b) + var id restic.ID + + saveTests := 10 + if s.MinimalData { + saveTests = 2 + } + + for i := 0; i < saveTests; i++ { + length := rand.Intn(1<<23) + 200000 + data := test.Random(23, length) + // use the first 32 byte as the ID + copy(id[:], data) + + h := restic.Handle{ + Type: restic.DataFile, + Name: fmt.Sprintf("%s-%d", id, i), + } + err := b.Save(context.TODO(), h, restic.NewByteReader(data)) + test.OK(t, err) + + buf, err := backend.LoadAll(context.TODO(), b, h) + test.OK(t, err) + if len(buf) != len(data) { + t.Fatalf("number of bytes does not match, want %v, got %v", len(data), len(buf)) + } + + if !bytes.Equal(buf, data) { + t.Fatalf("data not equal") + } + + fi, err := b.Stat(context.TODO(), h) + test.OK(t, err) + + if fi.Name != h.Name { + t.Errorf("Stat() returned wrong name, want %q, got %q", h.Name, fi.Name) + } + + if fi.Size != int64(len(data)) { + t.Errorf("Stat() returned different size, want %q, got %d", len(data), fi.Size) + } + + err = b.Remove(context.TODO(), h) + if err != nil { + t.Fatalf("error removing item: %+v", err) + } + } + + // test saving from a tempfile + tmpfile, err := ioutil.TempFile("", "restic-backend-save-test-") + if err != nil { + t.Fatal(err) + } + + length := rand.Intn(1<<23) + 200000 + data := test.Random(23, length) + copy(id[:], data) + + if _, err = tmpfile.Write(data); err != nil { + t.Fatal(err) + } + + if _, err = tmpfile.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } + + h := restic.Handle{Type: restic.DataFile, Name: id.String()} + + // wrap the tempfile in an errorCloser, so we can detect if the backend + // closes the reader + err = b.Save(context.TODO(), h, errorCloser{t: t, l: int64(length), ReadSeeker: tmpfile}) + if err != nil { + t.Fatal(err) + } + + err = s.delayedRemove(t, b, h) + if err != nil { + t.Fatalf("error removing item: %+v", err) + } + + if err = tmpfile.Close(); err != nil { + t.Fatal(err) + } + + if err = os.Remove(tmpfile.Name()); err != nil { + t.Fatal(err) + } +} + +var filenameTests = []struct { + name string + data string +}{ + {"1dfc6bc0f06cb255889e9ea7860a5753e8eb9665c9a96627971171b444e3113e", "x"}, + {"f00b4r", "foobar"}, + { + "1dfc6bc0f06cb255889e9ea7860a5753e8eb9665c9a96627971171b444e3113e4bf8f2d9144cc5420a80f04a4880ad6155fc58903a4fb6457c476c43541dcaa6-5", + "foobar content of data blob", + }, +} + +// TestSaveFilenames tests saving data with various file names in the backend. +func (s *Suite) TestSaveFilenames(t *testing.T) { + b := s.open(t) + defer s.close(t, b) + + for i, test := range filenameTests { + h := restic.Handle{Name: test.name, Type: restic.DataFile} + err := b.Save(context.TODO(), h, restic.NewByteReader([]byte(test.data))) + if err != nil { + t.Errorf("test %d failed: Save() returned %+v", i, err) + continue + } + + buf, err := backend.LoadAll(context.TODO(), b, h) + if err != nil { + t.Errorf("test %d failed: Load() returned %+v", i, err) + continue + } + + if !bytes.Equal(buf, []byte(test.data)) { + t.Errorf("test %d: returned wrong bytes", i) + } + + err = b.Remove(context.TODO(), h) + if err != nil { + t.Errorf("test %d failed: Remove() returned %+v", i, err) + continue + } + } +} + +var testStrings = []struct { + id string + data string +}{ + {"c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2", "foobar"}, + {"248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"}, + {"cc5d46bdb4991c6eae3eb739c9c8a7a46fe9654fab79c47b4fe48383b5b25e1c", "foo/bar"}, + {"4e54d2c721cbdb730f01b10b62dec622962b36966ec685880effa63d71c808f2", "foo/../../baz"}, +} + +func store(t testing.TB, b restic.Backend, tpe restic.FileType, data []byte) restic.Handle { + id := restic.Hash(data) + h := restic.Handle{Name: id.String(), Type: tpe} + err := b.Save(context.TODO(), h, restic.NewByteReader([]byte(data))) + test.OK(t, err) + return h +} + +// testLoad loads a blob (but discards its contents). +func testLoad(b restic.Backend, h restic.Handle, length int, offset int64) error { + return b.Load(context.TODO(), h, 0, 0, func(rd io.Reader) (ierr error) { + _, ierr = io.Copy(ioutil.Discard, rd) + return ierr + }) +} + +func (s *Suite) delayedRemove(t testing.TB, be restic.Backend, handles ...restic.Handle) error { + // Some backend (swift, I'm looking at you) may implement delayed + // removal of data. Let's wait a bit if this happens. + + for _, h := range handles { + err := be.Remove(context.TODO(), h) + if s.ErrorHandler != nil { + err = s.ErrorHandler(t, be, err) + } + if err != nil { + return err + } + } + + for _, h := range handles { + start := time.Now() + attempt := 0 + var found bool + var err error + for time.Since(start) <= s.WaitForDelayedRemoval { + found, err = be.Test(context.TODO(), h) + if s.ErrorHandler != nil { + err = s.ErrorHandler(t, be, err) + } + if err != nil { + return err + } + + if !found { + break + } + + time.Sleep(2 * time.Second) + attempt++ + } + + if found { + t.Fatalf("removed blob %v still present after %v (%d attempts)", h, time.Since(start), attempt) + } + } + + return nil +} + +func delayedList(t testing.TB, b restic.Backend, tpe restic.FileType, max int, maxwait time.Duration) restic.IDs { + list := restic.NewIDSet() + start := time.Now() + for i := 0; i < max; i++ { + err := b.List(context.TODO(), tpe, func(fi restic.FileInfo) error { + id := restic.TestParseID(fi.Name) + list.Insert(id) + return nil + }) + + if err != nil { + t.Fatal(err) + } + + if len(list) < max && time.Since(start) < maxwait { + time.Sleep(500 * time.Millisecond) + } + } + + return list.List() +} + +// TestBackend tests all functions of the backend. +func (s *Suite) TestBackend(t *testing.T) { + b := s.open(t) + defer s.close(t, b) + + for _, tpe := range []restic.FileType{ + restic.DataFile, restic.KeyFile, restic.LockFile, + restic.SnapshotFile, restic.IndexFile, + } { + // detect non-existing files + for _, ts := range testStrings { + id, err := restic.ParseID(ts.id) + test.OK(t, err) + + // test if blob is already in repository + h := restic.Handle{Type: tpe, Name: id.String()} + ret, err := b.Test(context.TODO(), h) + test.OK(t, err) + test.Assert(t, !ret, "blob was found to exist before creating") + + // try to stat a not existing blob + _, err = b.Stat(context.TODO(), h) + test.Assert(t, err != nil, "blob data could be extracted before creation") + + // try to read not existing blob + err = testLoad(b, h, 0, 0) + test.Assert(t, err != nil, "blob could be read before creation") + + // try to get string out, should fail + ret, err = b.Test(context.TODO(), h) + test.OK(t, err) + test.Assert(t, !ret, "id %q was found (but should not have)", ts.id) + } + + // add files + for _, ts := range testStrings { + store(t, b, tpe, []byte(ts.data)) + + // test Load() + h := restic.Handle{Type: tpe, Name: ts.id} + buf, err := backend.LoadAll(context.TODO(), b, h) + test.OK(t, err) + test.Equals(t, ts.data, string(buf)) + + // try to read it out with an offset and a length + start := 1 + end := len(ts.data) - 2 + length := end - start + + buf2 := make([]byte, length) + var n int + err = b.Load(context.TODO(), h, len(buf2), int64(start), func(rd io.Reader) (ierr error) { + n, ierr = io.ReadFull(rd, buf2) + return ierr + }) + test.OK(t, err) + test.OK(t, err) + test.Equals(t, len(buf2), n) + test.Equals(t, ts.data[start:end], string(buf2)) + } + + // test adding the first file again + ts := testStrings[0] + h := restic.Handle{Type: tpe, Name: ts.id} + + // remove and recreate + err := s.delayedRemove(t, b, h) + test.OK(t, err) + + // test that the blob is gone + ok, err := b.Test(context.TODO(), h) + test.OK(t, err) + test.Assert(t, !ok, "removed blob still present") + + // create blob + err = b.Save(context.TODO(), h, restic.NewByteReader([]byte(ts.data))) + test.OK(t, err) + + // list items + IDs := restic.IDs{} + + for _, ts := range testStrings { + id, err := restic.ParseID(ts.id) + test.OK(t, err) + IDs = append(IDs, id) + } + + list := delayedList(t, b, tpe, len(IDs), s.WaitForDelayedRemoval) + if len(IDs) != len(list) { + t.Fatalf("wrong number of IDs returned: want %d, got %d", len(IDs), len(list)) + } + + sort.Sort(IDs) + sort.Sort(list) + + if !reflect.DeepEqual(IDs, list) { + t.Fatalf("lists aren't equal, want:\n %v\n got:\n%v\n", IDs, list) + } + + var handles []restic.Handle + for _, ts := range testStrings { + id, err := restic.ParseID(ts.id) + test.OK(t, err) + + h := restic.Handle{Type: tpe, Name: id.String()} + + found, err := b.Test(context.TODO(), h) + test.OK(t, err) + test.Assert(t, found, fmt.Sprintf("id %q not found", id)) + + handles = append(handles, h) + } + + test.OK(t, s.delayedRemove(t, b, handles...)) + } +} + +// TestZZZDelete tests the Delete function. The name ensures that this test is executed last. +func (s *Suite) TestZZZDelete(t *testing.T) { + if !test.TestCleanupTempDirs { + t.Skipf("not removing backend, TestCleanupTempDirs is false") + } + + b := s.open(t) + defer s.close(t, b) + + err := b.Delete(context.TODO()) + if err != nil { + t.Fatalf("error deleting backend: %+v", err) + } +} diff --git a/internal/backend/test/tests_test.go b/internal/backend/test/tests_test.go new file mode 100644 index 000000000..de1663cbc --- /dev/null +++ b/internal/backend/test/tests_test.go @@ -0,0 +1,68 @@ +package test_test + +import ( + "context" + "testing" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + + "github.com/restic/restic/internal/backend/mem" + "github.com/restic/restic/internal/backend/test" +) + +//go:generate go run generate_test_list.go + +type memConfig struct { + be restic.Backend +} + +func newTestSuite(t testing.TB) *test.Suite { + return &test.Suite{ + // NewConfig returns a config for a new temporary backend that will be used in tests. + NewConfig: func() (interface{}, error) { + return &memConfig{}, nil + }, + + // CreateFn is a function that creates a temporary repository for the tests. + Create: func(cfg interface{}) (restic.Backend, error) { + c := cfg.(*memConfig) + if c.be != nil { + ok, err := c.be.Test(context.TODO(), restic.Handle{Type: restic.ConfigFile}) + if err != nil { + return nil, err + } + + if ok { + return nil, errors.New("config already exists") + } + } + + c.be = mem.New() + return c.be, nil + }, + + // OpenFn is a function that opens a previously created temporary repository. + Open: func(cfg interface{}) (restic.Backend, error) { + c := cfg.(*memConfig) + if c.be == nil { + c.be = mem.New() + } + return c.be, nil + }, + + // CleanupFn removes data created during the tests. + Cleanup: func(cfg interface{}) error { + // no cleanup needed + return nil + }, + } +} + +func TestSuiteBackendMem(t *testing.T) { + newTestSuite(t).RunTests(t) +} + +func BenchmarkSuiteBackendMem(b *testing.B) { + newTestSuite(b).RunBenchmarks(b) +} diff --git a/internal/backend/testdata/repo-layout-default.tar.gz b/internal/backend/testdata/repo-layout-default.tar.gz new file mode 100644 index 000000000..e38deb54b Binary files /dev/null and b/internal/backend/testdata/repo-layout-default.tar.gz differ diff --git a/internal/backend/testdata/repo-layout-s3legacy.tar.gz b/internal/backend/testdata/repo-layout-s3legacy.tar.gz new file mode 100644 index 000000000..2b7d852cc Binary files /dev/null and b/internal/backend/testdata/repo-layout-s3legacy.tar.gz differ diff --git a/internal/backend/utils.go b/internal/backend/utils.go new file mode 100644 index 000000000..222f210e5 --- /dev/null +++ b/internal/backend/utils.go @@ -0,0 +1,51 @@ +package backend + +import ( + "context" + "io" + "io/ioutil" + + "github.com/restic/restic/internal/restic" +) + +// LoadAll reads all data stored in the backend for the handle. +func LoadAll(ctx context.Context, be restic.Backend, h restic.Handle) (buf []byte, err error) { + err = be.Load(ctx, h, 0, 0, func(rd io.Reader) (ierr error) { + buf, ierr = ioutil.ReadAll(rd) + return ierr + }) + return buf, err +} + +// LimitedReadCloser wraps io.LimitedReader and exposes the Close() method. +type LimitedReadCloser struct { + io.ReadCloser + io.Reader +} + +// Read reads data from the limited reader. +func (l *LimitedReadCloser) Read(p []byte) (int, error) { + return l.Reader.Read(p) +} + +// LimitReadCloser returns a new reader wraps r in an io.LimitReader, but also +// exposes the Close() method. +func LimitReadCloser(r io.ReadCloser, n int64) *LimitedReadCloser { + return &LimitedReadCloser{ReadCloser: r, Reader: io.LimitReader(r, n)} +} + +// DefaultLoad implements Backend.Load using lower-level openReader func +func DefaultLoad(ctx context.Context, h restic.Handle, length int, offset int64, + openReader func(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error), + fn func(rd io.Reader) error) error { + rd, err := openReader(ctx, h, length, offset) + if err != nil { + return err + } + err = fn(rd) + if err != nil { + rd.Close() // ignore secondary errors closing the reader + return err + } + return rd.Close() +} diff --git a/internal/backend/utils_test.go b/internal/backend/utils_test.go new file mode 100644 index 000000000..74929fd0b --- /dev/null +++ b/internal/backend/utils_test.go @@ -0,0 +1,147 @@ +package backend_test + +import ( + "bytes" + "context" + "io" + "math/rand" + "testing" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/backend/mem" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +const KiB = 1 << 10 +const MiB = 1 << 20 + +func TestLoadAll(t *testing.T) { + b := mem.New() + + for i := 0; i < 20; i++ { + data := rtest.Random(23+i, rand.Intn(MiB)+500*KiB) + + id := restic.Hash(data) + h := restic.Handle{Name: id.String(), Type: restic.DataFile} + err := b.Save(context.TODO(), h, restic.NewByteReader(data)) + rtest.OK(t, err) + + buf, err := backend.LoadAll(context.TODO(), b, restic.Handle{Type: restic.DataFile, Name: id.String()}) + rtest.OK(t, err) + + if len(buf) != len(data) { + t.Errorf("length of returned buffer does not match, want %d, got %d", len(data), len(buf)) + continue + } + + if !bytes.Equal(buf, data) { + t.Errorf("wrong data returned") + continue + } + } +} + +func TestLoadSmallBuffer(t *testing.T) { + b := mem.New() + + for i := 0; i < 20; i++ { + data := rtest.Random(23+i, rand.Intn(MiB)+500*KiB) + + id := restic.Hash(data) + h := restic.Handle{Name: id.String(), Type: restic.DataFile} + err := b.Save(context.TODO(), h, restic.NewByteReader(data)) + rtest.OK(t, err) + + buf, err := backend.LoadAll(context.TODO(), b, restic.Handle{Type: restic.DataFile, Name: id.String()}) + rtest.OK(t, err) + + if len(buf) != len(data) { + t.Errorf("length of returned buffer does not match, want %d, got %d", len(data), len(buf)) + continue + } + + if !bytes.Equal(buf, data) { + t.Errorf("wrong data returned") + continue + } + } +} + +func TestLoadLargeBuffer(t *testing.T) { + b := mem.New() + + for i := 0; i < 20; i++ { + data := rtest.Random(23+i, rand.Intn(MiB)+500*KiB) + + id := restic.Hash(data) + h := restic.Handle{Name: id.String(), Type: restic.DataFile} + err := b.Save(context.TODO(), h, restic.NewByteReader(data)) + rtest.OK(t, err) + + buf, err := backend.LoadAll(context.TODO(), b, restic.Handle{Type: restic.DataFile, Name: id.String()}) + rtest.OK(t, err) + + if len(buf) != len(data) { + t.Errorf("length of returned buffer does not match, want %d, got %d", len(data), len(buf)) + continue + } + + if !bytes.Equal(buf, data) { + t.Errorf("wrong data returned") + continue + } + } +} + +type mockReader struct { + closed bool +} + +func (rd *mockReader) Read(p []byte) (n int, err error) { + return 0, nil +} +func (rd *mockReader) Close() error { + rd.closed = true + return nil +} + +func TestDefaultLoad(t *testing.T) { + + h := restic.Handle{Name: "id", Type: restic.DataFile} + rd := &mockReader{} + + // happy case, assert correct parameters are passed around and content stream is closed + err := backend.DefaultLoad(context.TODO(), h, 10, 11, func(ctx context.Context, ih restic.Handle, length int, offset int64) (io.ReadCloser, error) { + rtest.Equals(t, h, ih) + rtest.Equals(t, int(10), length) + rtest.Equals(t, int64(11), offset) + + return rd, nil + }, func(ird io.Reader) error { + rtest.Equals(t, rd, ird) + return nil + }) + rtest.OK(t, err) + rtest.Equals(t, true, rd.closed) + + // unhappy case, assert producer errors are handled correctly + err = backend.DefaultLoad(context.TODO(), h, 10, 11, func(ctx context.Context, ih restic.Handle, length int, offset int64) (io.ReadCloser, error) { + return nil, errors.Errorf("producer error") + }, func(ird io.Reader) error { + t.Fatalf("unexpected consumer invocation") + return nil + }) + rtest.Equals(t, "producer error", err.Error()) + + // unhappy case, assert consumer errors are handled correctly + rd = &mockReader{} + err = backend.DefaultLoad(context.TODO(), h, 10, 11, func(ctx context.Context, ih restic.Handle, length int, offset int64) (io.ReadCloser, error) { + return rd, nil + }, func(ird io.Reader) error { + return errors.Errorf("consumer error") + }) + rtest.Equals(t, true, rd.closed) + rtest.Equals(t, "consumer error", err.Error()) +} diff --git a/internal/cache/backend.go b/internal/cache/backend.go new file mode 100644 index 000000000..824a35753 --- /dev/null +++ b/internal/cache/backend.go @@ -0,0 +1,231 @@ +package cache + +import ( + "context" + "io" + "sync" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/restic" +) + +// Backend wraps a restic.Backend and adds a cache. +type Backend struct { + restic.Backend + *Cache + + // inProgress contains the handle for all files that are currently + // downloaded. The channel in the value is closed as soon as the download + // is finished. + inProgressMutex sync.Mutex + inProgress map[restic.Handle]chan struct{} +} + +// ensure cachedBackend implements restic.Backend +var _ restic.Backend = &Backend{} + +func newBackend(be restic.Backend, c *Cache) *Backend { + return &Backend{ + Backend: be, + Cache: c, + inProgress: make(map[restic.Handle]chan struct{}), + } +} + +// Remove deletes a file from the backend and the cache if it has been cached. +func (b *Backend) Remove(ctx context.Context, h restic.Handle) error { + debug.Log("cache Remove(%v)", h) + err := b.Backend.Remove(ctx, h) + if err != nil { + return err + } + + return b.Cache.Remove(h) +} + +var autoCacheTypes = map[restic.FileType]struct{}{ + restic.IndexFile: {}, + restic.SnapshotFile: {}, +} + +// Save stores a new file in the backend and the cache. +func (b *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error { + if _, ok := autoCacheTypes[h.Type]; !ok { + return b.Backend.Save(ctx, h, rd) + } + + debug.Log("Save(%v): auto-store in the cache", h) + + // make sure the reader is at the start + err := rd.Rewind() + if err != nil { + return err + } + + // first, save in the backend + err = b.Backend.Save(ctx, h, rd) + if err != nil { + return err + } + + // next, save in the cache + err = rd.Rewind() + if err != nil { + return err + } + + err = b.Cache.Save(h, rd) + if err != nil { + debug.Log("unable to save %v to cache: %v", h, err) + _ = b.Cache.Remove(h) + return nil + } + + return nil +} + +var autoCacheFiles = map[restic.FileType]bool{ + restic.IndexFile: true, + restic.SnapshotFile: true, +} + +func (b *Backend) cacheFile(ctx context.Context, h restic.Handle) error { + finish := make(chan struct{}) + + b.inProgressMutex.Lock() + other, alreadyDownloading := b.inProgress[h] + if !alreadyDownloading { + b.inProgress[h] = finish + } + b.inProgressMutex.Unlock() + + if alreadyDownloading { + debug.Log("readahead %v is already performed by somebody else, delegating...", h) + <-other + debug.Log("download %v finished", h) + return nil + } + + // test again, maybe the file was cached in the meantime + if !b.Cache.Has(h) { + + // nope, it's still not in the cache, pull it from the repo and save it + + err := b.Backend.Load(ctx, h, 0, 0, func(rd io.Reader) error { + return b.Cache.Save(h, rd) + }) + if err != nil { + // try to remove from the cache, ignore errors + _ = b.Cache.Remove(h) + } + } + + // signal other waiting goroutines that the file may now be cached + close(finish) + + // remove the finish channel from the map + b.inProgressMutex.Lock() + delete(b.inProgress, h) + b.inProgressMutex.Unlock() + + return nil +} + +// loadFromCacheOrDelegate will try to load the file from the cache, and fall +// back to the backend if that fails. +func (b *Backend) loadFromCacheOrDelegate(ctx context.Context, h restic.Handle, length int, offset int64, consumer func(rd io.Reader) error) error { + rd, err := b.Cache.Load(h, length, offset) + if err != nil { + debug.Log("error caching %v: %v, falling back to backend", h, err) + return b.Backend.Load(ctx, h, length, offset, consumer) + } + + err = consumer(rd) + if err != nil { + _ = rd.Close() // ignore secondary errors + return err + } + return rd.Close() +} + +// Load loads a file from the cache or the backend. +func (b *Backend) Load(ctx context.Context, h restic.Handle, length int, offset int64, consumer func(rd io.Reader) error) error { + b.inProgressMutex.Lock() + waitForFinish, inProgress := b.inProgress[h] + b.inProgressMutex.Unlock() + + if inProgress { + debug.Log("downloading %v is already in progress, waiting for finish", h) + <-waitForFinish + debug.Log("downloading %v finished", h) + } + + if b.Cache.Has(h) { + debug.Log("Load(%v, %v, %v) from cache", h, length, offset) + rd, err := b.Cache.Load(h, length, offset) + if err == nil { + err = consumer(rd) + if err != nil { + rd.Close() // ignore secondary errors + return err + } + return rd.Close() + } + debug.Log("error loading %v from cache: %v", h, err) + } + + // partial file requested + if offset != 0 || length != 0 { + if b.Cache.PerformReadahead(h) { + debug.Log("performing readahead for %v", h) + + err := b.cacheFile(ctx, h) + if err == nil { + return b.loadFromCacheOrDelegate(ctx, h, length, offset, consumer) + } + + debug.Log("error caching %v: %v", h, err) + } + + debug.Log("Load(%v, %v, %v): partial file requested, delegating to backend", h, length, offset) + return b.Backend.Load(ctx, h, length, offset, consumer) + } + + // if we don't automatically cache this file type, fall back to the backend + if _, ok := autoCacheFiles[h.Type]; !ok { + debug.Log("Load(%v, %v, %v): delegating to backend", h, length, offset) + return b.Backend.Load(ctx, h, length, offset, consumer) + } + + debug.Log("auto-store %v in the cache", h) + err := b.cacheFile(ctx, h) + if err == nil { + return b.loadFromCacheOrDelegate(ctx, h, length, offset, consumer) + } + + debug.Log("error caching %v: %v, falling back to backend", h, err) + return b.Backend.Load(ctx, h, length, offset, consumer) +} + +// Stat tests whether the backend has a file. If it does not exist but still +// exists in the cache, it is removed from the cache. +func (b *Backend) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) { + debug.Log("cache Stat(%v)", h) + + fi, err := b.Backend.Stat(ctx, h) + if err != nil { + if b.Backend.IsNotExist(err) { + // try to remove from the cache, ignore errors + _ = b.Cache.Remove(h) + } + + return fi, err + } + + return fi, err +} + +// IsNotExist returns true if the error is caused by a non-existing file. +func (b *Backend) IsNotExist(err error) bool { + return b.Backend.IsNotExist(err) +} diff --git a/internal/cache/backend_test.go b/internal/cache/backend_test.go new file mode 100644 index 000000000..b4cc431ac --- /dev/null +++ b/internal/cache/backend_test.go @@ -0,0 +1,174 @@ +package cache + +import ( + "bytes" + "context" + "io" + "math/rand" + "sync" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/backend/mem" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/test" +) + +func loadAndCompare(t testing.TB, be restic.Backend, h restic.Handle, data []byte) { + buf, err := backend.LoadAll(context.TODO(), be, h) + if err != nil { + t.Fatal(err) + } + + if len(buf) != len(data) { + t.Fatalf("wrong number of bytes read, want %v, got %v", len(data), len(buf)) + } + + if !bytes.Equal(buf, data) { + t.Fatalf("wrong data returned, want:\n %02x\ngot:\n %02x", data[:16], buf[:16]) + } +} + +func save(t testing.TB, be restic.Backend, h restic.Handle, data []byte) { + err := be.Save(context.TODO(), h, restic.NewByteReader(data)) + if err != nil { + t.Fatal(err) + } +} + +func remove(t testing.TB, be restic.Backend, h restic.Handle) { + err := be.Remove(context.TODO(), h) + if err != nil { + t.Fatal(err) + } +} + +func randomData(n int) (restic.Handle, []byte) { + data := test.Random(rand.Int(), n) + id := restic.Hash(data) + copy(id[:], data) + h := restic.Handle{ + Type: restic.IndexFile, + Name: id.String(), + } + return h, data +} + +func TestBackend(t *testing.T) { + be := mem.New() + + c, cleanup := TestNewCache(t) + defer cleanup() + + wbe := c.Wrap(be) + + h, data := randomData(5234142) + + // save directly in backend + save(t, be, h, data) + if c.Has(h) { + t.Errorf("cache has file too early") + } + + // load data via cache + loadAndCompare(t, wbe, h, data) + if !c.Has(h) { + t.Errorf("cache doesn't have file after load") + } + + // remove via cache + remove(t, wbe, h) + if c.Has(h) { + t.Errorf("cache has file after remove") + } + + // save via cache + save(t, wbe, h, data) + if !c.Has(h) { + t.Errorf("cache doesn't have file after load") + } + + // load data directly from backend + loadAndCompare(t, be, h, data) + + // load data via cache + loadAndCompare(t, be, h, data) + + // remove directly + remove(t, be, h) + if !c.Has(h) { + t.Errorf("file not in cache any more") + } + + // run stat + _, err := wbe.Stat(context.TODO(), h) + if err == nil { + t.Errorf("expected error for removed file not found, got nil") + } + + if !wbe.IsNotExist(err) { + t.Errorf("Stat() returned error that does not match IsNotExist(): %v", err) + } + + if c.Has(h) { + t.Errorf("removed file still in cache after stat") + } +} + +type loadErrorBackend struct { + restic.Backend + loadError error +} + +func (be loadErrorBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error { + time.Sleep(10 * time.Millisecond) + return be.loadError +} + +func TestErrorBackend(t *testing.T) { + be := mem.New() + + c, cleanup := TestNewCache(t) + defer cleanup() + + h, data := randomData(5234142) + + // save directly in backend + save(t, be, h, data) + + testErr := errors.New("test error") + errBackend := loadErrorBackend{ + Backend: be, + loadError: testErr, + } + + loadTest := func(wg *sync.WaitGroup, be restic.Backend) { + defer wg.Done() + + buf, err := backend.LoadAll(context.TODO(), be, h) + if err == testErr { + return + } + + if err != nil { + t.Error(err) + return + } + + if !bytes.Equal(buf, data) { + t.Errorf("data does not match") + } + time.Sleep(time.Millisecond) + } + + wrappedBE := c.Wrap(errBackend) + var wg sync.WaitGroup + for i := 0; i < 5; i++ { + wg.Add(1) + go loadTest(&wg, wrappedBE) + } + + wg.Wait() +} diff --git a/internal/cache/cache.go b/internal/cache/cache.go new file mode 100644 index 000000000..fd5743b94 --- /dev/null +++ b/internal/cache/cache.go @@ -0,0 +1,284 @@ +package cache + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strconv" + "time" + + "github.com/pkg/errors" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/restic" +) + +// Cache manages a local cache. +type Cache struct { + Path string + Base string + Created bool + PerformReadahead func(restic.Handle) bool +} + +const dirMode = 0700 +const fileMode = 0600 + +func readVersion(dir string) (v uint, err error) { + buf, err := ioutil.ReadFile(filepath.Join(dir, "version")) + if os.IsNotExist(err) { + return 0, nil + } + + if err != nil { + return 0, errors.Wrap(err, "ReadFile") + } + + ver, err := strconv.ParseUint(string(buf), 10, 32) + if err != nil { + return 0, errors.Wrap(err, "ParseUint") + } + + return uint(ver), nil +} + +const cacheVersion = 1 + +// ensure Cache implements restic.Cache +var _ restic.Cache = &Cache{} + +var cacheLayoutPaths = map[restic.FileType]string{ + restic.DataFile: "data", + restic.SnapshotFile: "snapshots", + restic.IndexFile: "index", +} + +const cachedirTagSignature = "Signature: 8a477f597d28d172789f06886806bc55\n" + +func writeCachedirTag(dir string) error { + if err := fs.MkdirAll(dir, dirMode); err != nil { + return err + } + + tagfile := filepath.Join(dir, "CACHEDIR.TAG") + _, err := fs.Lstat(tagfile) + if err != nil && !os.IsNotExist(err) { + return errors.Wrap(err, "Lstat") + } + + f, err := fs.OpenFile(tagfile, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0644) + if err != nil { + if os.IsExist(errors.Cause(err)) { + return nil + } + + return errors.Wrap(err, "OpenFile") + } + + debug.Log("Create CACHEDIR.TAG at %v", dir) + if _, err := f.Write([]byte(cachedirTagSignature)); err != nil { + _ = f.Close() + return errors.Wrap(err, "Write") + } + + return f.Close() +} + +// New returns a new cache for the repo ID at basedir. If basedir is the empty +// string, the default cache location (according to the XDG standard) is used. +// +// For partial files, the complete file is loaded and stored in the cache when +// performReadahead returns true. +func New(id string, basedir string) (c *Cache, err error) { + if basedir == "" { + basedir, err = DefaultDir() + if err != nil { + return nil, err + } + } + + created, err := mkdirCacheDir(basedir) + if err != nil { + return nil, err + } + + // create base dir and tag it as a cache directory + if err = writeCachedirTag(basedir); err != nil { + return nil, err + } + + cachedir := filepath.Join(basedir, id) + debug.Log("using cache dir %v", cachedir) + + v, err := readVersion(cachedir) + if err != nil { + return nil, err + } + + if v > cacheVersion { + return nil, errors.New("cache version is newer") + } + + // create the repo cache dir if it does not exist yet + _, err = fs.Lstat(cachedir) + if os.IsNotExist(err) { + err = fs.MkdirAll(cachedir, dirMode) + if err != nil { + return nil, err + } + created = true + } + + // update the timestamp so that we can detect old cache dirs + err = updateTimestamp(cachedir) + if err != nil { + return nil, err + } + + if v < cacheVersion { + err = ioutil.WriteFile(filepath.Join(cachedir, "version"), []byte(fmt.Sprintf("%d", cacheVersion)), 0644) + if err != nil { + return nil, errors.Wrap(err, "WriteFile") + } + } + + for _, p := range cacheLayoutPaths { + if err = fs.MkdirAll(filepath.Join(cachedir, p), dirMode); err != nil { + return nil, err + } + } + + c = &Cache{ + Path: cachedir, + Base: basedir, + Created: created, + PerformReadahead: func(restic.Handle) bool { + // do not perform readahead by default + return false + }, + } + + return c, nil +} + +// updateTimestamp sets the modification timestamp (mtime and atime) for the +// directory d to the current time. +func updateTimestamp(d string) error { + t := time.Now() + return fs.Chtimes(d, t, t) +} + +// MaxCacheAge is the default age (30 days) after which cache directories are considered old. +const MaxCacheAge = 30 * 24 * time.Hour + +func validCacheDirName(s string) bool { + r := regexp.MustCompile(`^[a-fA-F0-9]{64}$`) + if !r.MatchString(s) { + return false + } + + return true +} + +// listCacheDirs returns the list of cache directories. +func listCacheDirs(basedir string) ([]os.FileInfo, error) { + f, err := fs.Open(basedir) + if err != nil && os.IsNotExist(errors.Cause(err)) { + return nil, nil + } + + if err != nil { + return nil, err + } + + entries, err := f.Readdir(-1) + if err != nil { + return nil, err + } + + err = f.Close() + if err != nil { + return nil, err + } + + result := make([]os.FileInfo, 0, len(entries)) + for _, entry := range entries { + if !entry.IsDir() { + continue + } + + if !validCacheDirName(entry.Name()) { + continue + } + + result = append(result, entry) + } + + return result, nil +} + +// All returns a list of cache directories. +func All(basedir string) (dirs []os.FileInfo, err error) { + return listCacheDirs(basedir) +} + +// OlderThan returns the list of cache directories older than max. +func OlderThan(basedir string, max time.Duration) ([]os.FileInfo, error) { + entries, err := listCacheDirs(basedir) + if err != nil { + return nil, err + } + + var oldCacheDirs []os.FileInfo + for _, fi := range entries { + if !IsOld(fi.ModTime(), max) { + continue + } + + oldCacheDirs = append(oldCacheDirs, fi) + } + + debug.Log("%d old cache dirs found", len(oldCacheDirs)) + + return oldCacheDirs, nil +} + +// Old returns a list of cache directories with a modification time of more +// than 30 days ago. +func Old(basedir string) ([]os.FileInfo, error) { + return OlderThan(basedir, MaxCacheAge) +} + +// IsOld returns true if the timestamp is considered old. +func IsOld(t time.Time, maxAge time.Duration) bool { + oldest := time.Now().Add(-maxAge) + return t.Before(oldest) +} + +// errNoSuchFile is returned when a file is not cached. +type errNoSuchFile struct { + Type string + Name string +} + +func (e errNoSuchFile) Error() string { + return fmt.Sprintf("file %v (%v) is not cached", e.Name, e.Type) +} + +// IsNotExist returns true if the error was caused by a non-existing file. +func (c *Cache) IsNotExist(err error) bool { + _, ok := errors.Cause(err).(errNoSuchFile) + return ok +} + +// Wrap returns a backend with a cache. +func (c *Cache) Wrap(be restic.Backend) restic.Backend { + return newBackend(be, c) +} + +// BaseDir returns the base directory. +func (c *Cache) BaseDir() string { + return c.Base +} diff --git a/internal/cache/dir.go b/internal/cache/dir.go new file mode 100644 index 000000000..0b427b8e3 --- /dev/null +++ b/internal/cache/dir.go @@ -0,0 +1,98 @@ +package cache + +import ( + "os" + "path/filepath" + "runtime" + + "github.com/pkg/errors" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/fs" +) + +// xdgCacheDir returns the cache directory according to XDG basedir spec, see +// http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html +func xdgCacheDir() (string, error) { + xdgcache := os.Getenv("XDG_CACHE_HOME") + home := os.Getenv("HOME") + + if xdgcache != "" { + return filepath.Join(xdgcache, "restic"), nil + } else if home != "" { + return filepath.Join(home, ".cache", "restic"), nil + } + + return "", errors.New("unable to locate cache directory (XDG_CACHE_HOME and HOME unset)") +} + +// windowsCacheDir returns the cache directory for Windows. +// +// Uses LOCALAPPDATA, where application data not synchronized between machines +// is stored. (Browser caches stored here). +func windowsCacheDir() (string, error) { + appdata := os.Getenv("LOCALAPPDATA") + if appdata == "" { + return "", errors.New("unable to locate cache directory (LOCALAPPDATA unset)") + } + return filepath.Join(appdata, "restic"), nil +} + +// darwinCacheDir returns the cache directory for darwin. +// +// Uses ~/Library/Caches/, which is recommended by Apple, see +// https://developer.apple.com/library/content/documentation/FileManagement/Conceptual/FileSystemProgrammingGuide/MacOSXDirectories/MacOSXDirectories.html +func darwinCacheDir() (string, error) { + home := os.Getenv("HOME") + if home == "" { + return "", errors.New("unable to locate cache directory (HOME unset)") + } + return filepath.Join(home, "Library", "Caches", "restic"), nil +} + +// DefaultDir returns the default cache directory for the current OS. +func DefaultDir() (cachedir string, err error) { + switch runtime.GOOS { + case "darwin": + cachedir, err = darwinCacheDir() + case "windows": + cachedir, err = windowsCacheDir() + default: + // Default to XDG for Linux and any other OSes. + cachedir, err = xdgCacheDir() + } + + if err != nil { + return "", err + } + + return cachedir, nil +} + +// mkdirCacheDir ensures that the cache directory exists. It it didn't, created +// is set to true. +func mkdirCacheDir(cachedir string) (created bool, err error) { + var newCacheDir bool + + fi, err := fs.Stat(cachedir) + if os.IsNotExist(errors.Cause(err)) { + err = fs.MkdirAll(cachedir, 0700) + if err != nil { + return true, errors.Wrap(err, "MkdirAll") + } + + fi, err = fs.Stat(cachedir) + debug.Log("create cache dir %v", cachedir) + + newCacheDir = true + } + + if err != nil { + return newCacheDir, errors.Wrap(err, "Stat") + } + + if !fi.IsDir() { + return newCacheDir, errors.Errorf("cache dir %v is not a directory", cachedir) + } + + return newCacheDir, nil +} diff --git a/internal/cache/file.go b/internal/cache/file.go new file mode 100644 index 000000000..36c38ba97 --- /dev/null +++ b/internal/cache/file.go @@ -0,0 +1,222 @@ +package cache + +import ( + "io" + "os" + "path/filepath" + + "github.com/pkg/errors" + "github.com/restic/restic/internal/crypto" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/restic" +) + +func (c *Cache) filename(h restic.Handle) string { + if len(h.Name) < 2 { + panic("Name is empty or too short") + } + subdir := h.Name[:2] + return filepath.Join(c.Path, cacheLayoutPaths[h.Type], subdir, h.Name) +} + +func (c *Cache) canBeCached(t restic.FileType) bool { + if c == nil { + return false + } + + if _, ok := cacheLayoutPaths[t]; !ok { + return false + } + + return true +} + +type readCloser struct { + io.Reader + io.Closer +} + +// Load returns a reader that yields the contents of the file with the +// given handle. rd must be closed after use. If an error is returned, the +// ReadCloser is nil. +func (c *Cache) Load(h restic.Handle, length int, offset int64) (io.ReadCloser, error) { + debug.Log("Load from cache: %v", h) + if !c.canBeCached(h.Type) { + return nil, errors.New("cannot be cached") + } + + f, err := fs.Open(c.filename(h)) + if err != nil { + return nil, errors.Wrap(err, "Open") + } + + fi, err := f.Stat() + if err != nil { + _ = f.Close() + return nil, errors.Wrap(err, "Stat") + } + + if fi.Size() <= crypto.Extension { + _ = f.Close() + _ = c.Remove(h) + return nil, errors.Errorf("cached file %v is truncated, removing", h) + } + + if fi.Size() < offset+int64(length) { + _ = f.Close() + _ = c.Remove(h) + return nil, errors.Errorf("cached file %v is too small, removing", h) + } + + if offset > 0 { + if _, err = f.Seek(offset, io.SeekStart); err != nil { + _ = f.Close() + return nil, err + } + } + + rd := readCloser{Reader: f, Closer: f} + if length > 0 { + rd.Reader = io.LimitReader(f, int64(length)) + } + + return rd, nil +} + +// SaveWriter returns a writer for the cache object h. It must be closed after writing is finished. +func (c *Cache) SaveWriter(h restic.Handle) (io.WriteCloser, error) { + debug.Log("Save to cache: %v", h) + if !c.canBeCached(h.Type) { + return nil, errors.New("cannot be cached") + } + + p := c.filename(h) + err := fs.MkdirAll(filepath.Dir(p), 0700) + if err != nil { + return nil, errors.Wrap(err, "MkdirAll") + } + + f, err := fs.OpenFile(p, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0400) + if err != nil { + return nil, errors.Wrap(err, "Create") + } + + return f, err +} + +// Save saves a file in the cache. +func (c *Cache) Save(h restic.Handle, rd io.Reader) error { + debug.Log("Save to cache: %v", h) + if rd == nil { + return errors.New("Save() called with nil reader") + } + + f, err := c.SaveWriter(h) + if err != nil { + return err + } + + n, err := io.Copy(f, rd) + if err != nil { + _ = f.Close() + _ = c.Remove(h) + return errors.Wrap(err, "Copy") + } + + if n <= crypto.Extension { + _ = f.Close() + _ = c.Remove(h) + debug.Log("trying to cache truncated file %v, removing", h) + return nil + } + + if err = f.Close(); err != nil { + _ = c.Remove(h) + return errors.Wrap(err, "Close") + } + + return nil +} + +// Remove deletes a file. When the file is not cache, no error is returned. +func (c *Cache) Remove(h restic.Handle) error { + if !c.Has(h) { + return nil + } + + return fs.Remove(c.filename(h)) +} + +// Clear removes all files of type t from the cache that are not contained in +// the set valid. +func (c *Cache) Clear(t restic.FileType, valid restic.IDSet) error { + debug.Log("Clearing cache for %v: %v valid files", t, len(valid)) + if !c.canBeCached(t) { + return nil + } + + list, err := c.list(t) + if err != nil { + return err + } + + for id := range list { + if valid.Has(id) { + continue + } + + if err = fs.Remove(c.filename(restic.Handle{Type: t, Name: id.String()})); err != nil { + return err + } + } + + return nil +} + +func isFile(fi os.FileInfo) bool { + return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0 +} + +// list returns a list of all files of type T in the cache. +func (c *Cache) list(t restic.FileType) (restic.IDSet, error) { + if !c.canBeCached(t) { + return nil, errors.New("cannot be cached") + } + + list := restic.NewIDSet() + dir := filepath.Join(c.Path, cacheLayoutPaths[t]) + err := filepath.Walk(dir, func(name string, fi os.FileInfo, err error) error { + if err != nil { + return errors.Wrap(err, "Walk") + } + + if !isFile(fi) { + return nil + } + + id, err := restic.ParseID(filepath.Base(name)) + if err != nil { + return nil + } + + list.Insert(id) + return nil + }) + + return list, err +} + +// Has returns true if the file is cached. +func (c *Cache) Has(h restic.Handle) bool { + if !c.canBeCached(h.Type) { + return false + } + + _, err := fs.Stat(c.filename(h)) + if err == nil { + return true + } + + return false +} diff --git a/internal/cache/file_test.go b/internal/cache/file_test.go new file mode 100644 index 000000000..cf275c84e --- /dev/null +++ b/internal/cache/file_test.go @@ -0,0 +1,259 @@ +package cache + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "math/rand" + "testing" + "time" + + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/test" +) + +func generateRandomFiles(t testing.TB, tpe restic.FileType, c *Cache) restic.IDSet { + ids := restic.NewIDSet() + for i := 0; i < rand.Intn(15)+10; i++ { + buf := test.Random(rand.Int(), 1<<19) + id := restic.Hash(buf) + h := restic.Handle{Type: tpe, Name: id.String()} + + if c.Has(h) { + t.Errorf("index %v present before save", id) + } + + err := c.Save(h, bytes.NewReader(buf)) + if err != nil { + t.Fatal(err) + } + ids.Insert(id) + } + return ids +} + +// randomID returns a random ID from s. +func randomID(s restic.IDSet) restic.ID { + for id := range s { + return id + } + panic("set is empty") +} + +func load(t testing.TB, c *Cache, h restic.Handle) []byte { + rd, err := c.Load(h, 0, 0) + if err != nil { + t.Fatal(err) + } + + if rd == nil { + t.Fatalf("Load() returned nil reader") + } + + buf, err := ioutil.ReadAll(rd) + if err != nil { + t.Fatal(err) + } + + if err = rd.Close(); err != nil { + t.Fatal(err) + } + + return buf +} + +func listFiles(t testing.TB, c *Cache, tpe restic.FileType) restic.IDSet { + list, err := c.list(tpe) + if err != nil { + t.Errorf("listing failed: %v", err) + } + + return list +} + +func clearFiles(t testing.TB, c *Cache, tpe restic.FileType, valid restic.IDSet) { + if err := c.Clear(tpe, valid); err != nil { + t.Error(err) + } +} + +func TestFiles(t *testing.T) { + seed := time.Now().Unix() + t.Logf("seed is %v", seed) + rand.Seed(seed) + + c, cleanup := TestNewCache(t) + defer cleanup() + + var tests = []restic.FileType{ + restic.SnapshotFile, + restic.DataFile, + restic.IndexFile, + } + + for _, tpe := range tests { + t.Run(fmt.Sprintf("%v", tpe), func(t *testing.T) { + ids := generateRandomFiles(t, tpe, c) + id := randomID(ids) + + h := restic.Handle{Type: tpe, Name: id.String()} + id2 := restic.Hash(load(t, c, h)) + + if !id.Equal(id2) { + t.Errorf("wrong data returned, want %v, got %v", id.Str(), id2.Str()) + } + + if !c.Has(h) { + t.Errorf("cache thinks index %v isn't present", id.Str()) + } + + list := listFiles(t, c, tpe) + if !ids.Equals(list) { + t.Errorf("wrong list of index IDs returned, want:\n %v\ngot:\n %v", ids, list) + } + + clearFiles(t, c, tpe, restic.NewIDSet(id)) + list2 := listFiles(t, c, tpe) + ids.Delete(id) + want := restic.NewIDSet(id) + if !list2.Equals(want) { + t.Errorf("ClearIndexes removed indexes, want:\n %v\ngot:\n %v", list2, want) + } + + clearFiles(t, c, tpe, restic.NewIDSet()) + want = restic.NewIDSet() + list3 := listFiles(t, c, tpe) + if !list3.Equals(want) { + t.Errorf("ClearIndexes returned a wrong list, want:\n %v\ngot:\n %v", want, list3) + } + }) + } +} + +func TestFileSaveWriter(t *testing.T) { + seed := time.Now().Unix() + t.Logf("seed is %v", seed) + rand.Seed(seed) + + c, cleanup := TestNewCache(t) + defer cleanup() + + // save about 5 MiB of data in the cache + data := test.Random(rand.Int(), 5234142) + id := restic.ID{} + copy(id[:], data) + h := restic.Handle{ + Type: restic.DataFile, + Name: id.String(), + } + + wr, err := c.SaveWriter(h) + if err != nil { + t.Fatal(err) + } + + n, err := io.Copy(wr, bytes.NewReader(data)) + if err != nil { + t.Fatal(err) + } + + if n != int64(len(data)) { + t.Fatalf("wrong number of bytes written, want %v, got %v", len(data), n) + } + + if err = wr.Close(); err != nil { + t.Fatal(err) + } + + rd, err := c.Load(h, 0, 0) + if err != nil { + t.Fatal(err) + } + + buf, err := ioutil.ReadAll(rd) + if err != nil { + t.Fatal(err) + } + + if len(buf) != len(data) { + t.Fatalf("wrong number of bytes read, want %v, got %v", len(data), len(buf)) + } + + if !bytes.Equal(buf, data) { + t.Fatalf("wrong data returned, want:\n %02x\ngot:\n %02x", data[:16], buf[:16]) + } + + if err = rd.Close(); err != nil { + t.Fatal(err) + } +} + +func TestFileLoad(t *testing.T) { + seed := time.Now().Unix() + t.Logf("seed is %v", seed) + rand.Seed(seed) + + c, cleanup := TestNewCache(t) + defer cleanup() + + // save about 5 MiB of data in the cache + data := test.Random(rand.Int(), 5234142) + id := restic.ID{} + copy(id[:], data) + h := restic.Handle{ + Type: restic.DataFile, + Name: id.String(), + } + if err := c.Save(h, bytes.NewReader(data)); err != nil { + t.Fatalf("Save() returned error: %v", err) + } + + var tests = []struct { + offset int64 + length int + }{ + {0, 0}, + {5, 0}, + {32*1024 + 5, 0}, + {0, 123}, + {0, 64*1024 + 234}, + {100, 5234142 - 100}, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%v/%v", test.length, test.offset), func(t *testing.T) { + rd, err := c.Load(h, test.length, test.offset) + if err != nil { + t.Fatal(err) + } + + buf, err := ioutil.ReadAll(rd) + if err != nil { + t.Fatal(err) + } + + if err = rd.Close(); err != nil { + t.Fatal(err) + } + + o := int(test.offset) + l := test.length + if test.length == 0 { + l = len(data) - o + } + + if l > len(data)-o { + l = len(data) - o + } + + if len(buf) != l { + t.Fatalf("wrong number of bytes returned: want %d, got %d", l, len(buf)) + } + + if !bytes.Equal(buf, data[o:o+l]) { + t.Fatalf("wrong data returned, want:\n %02x\ngot:\n %02x", data[o:o+16], buf[:16]) + } + }) + } +} diff --git a/internal/cache/testing.go b/internal/cache/testing.go new file mode 100644 index 000000000..b3156374d --- /dev/null +++ b/internal/cache/testing.go @@ -0,0 +1,20 @@ +package cache + +import ( + "testing" + + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/test" +) + +// TestNewCache returns a cache in a temporary directory which is removed when +// cleanup is called. +func TestNewCache(t testing.TB) (*Cache, func()) { + dir, cleanup := test.TempDir(t) + t.Logf("created new cache at %v", dir) + cache, err := New(restic.NewRandomID().String(), dir) + if err != nil { + t.Fatal(err) + } + return cache, cleanup +} diff --git a/internal/checker/checker.go b/internal/checker/checker.go new file mode 100644 index 000000000..7255e990d --- /dev/null +++ b/internal/checker/checker.go @@ -0,0 +1,785 @@ +package checker + +import ( + "context" + "fmt" + "io" + "os" + "sync" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/pack" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + "golang.org/x/sync/errgroup" +) + +// Checker runs various checks on a repository. It is advisable to create an +// exclusive Lock in the repository before running any checks. +// +// A Checker only tests for internal errors within the data structures of the +// repository (e.g. missing blobs), and needs a valid Repository to work on. +type Checker struct { + packs restic.IDSet + blobs restic.IDSet + blobRefs struct { + sync.Mutex + M map[restic.ID]uint + } + indexes map[restic.ID]*repository.Index + + masterIndex *repository.MasterIndex + + repo restic.Repository +} + +// New returns a new checker which runs on repo. +func New(repo restic.Repository) *Checker { + c := &Checker{ + packs: restic.NewIDSet(), + blobs: restic.NewIDSet(), + masterIndex: repository.NewMasterIndex(), + indexes: make(map[restic.ID]*repository.Index), + repo: repo, + } + + c.blobRefs.M = make(map[restic.ID]uint) + + return c +} + +const defaultParallelism = 40 + +// ErrDuplicatePacks is returned when a pack is found in more than one index. +type ErrDuplicatePacks struct { + PackID restic.ID + Indexes restic.IDSet +} + +func (e ErrDuplicatePacks) Error() string { + return fmt.Sprintf("pack %v contained in several indexes: %v", e.PackID.Str(), e.Indexes) +} + +// ErrOldIndexFormat is returned when an index with the old format is +// found. +type ErrOldIndexFormat struct { + restic.ID +} + +func (err ErrOldIndexFormat) Error() string { + return fmt.Sprintf("index %v has old format", err.ID.Str()) +} + +// LoadIndex loads all index files. +func (c *Checker) LoadIndex(ctx context.Context) (hints []error, errs []error) { + debug.Log("Start") + type indexRes struct { + Index *repository.Index + err error + ID string + } + + indexCh := make(chan indexRes) + + worker := func(ctx context.Context, id restic.ID) error { + debug.Log("worker got index %v", id) + idx, err := repository.LoadIndexWithDecoder(ctx, c.repo, id, repository.DecodeIndex) + if errors.Cause(err) == repository.ErrOldIndexFormat { + debug.Log("index %v has old format", id) + hints = append(hints, ErrOldIndexFormat{id}) + + idx, err = repository.LoadIndexWithDecoder(ctx, c.repo, id, repository.DecodeOldIndex) + } + + err = errors.Wrapf(err, "error loading index %v", id.Str()) + + select { + case indexCh <- indexRes{Index: idx, ID: id.String(), err: err}: + case <-ctx.Done(): + } + + return nil + } + + go func() { + defer close(indexCh) + debug.Log("start loading indexes in parallel") + err := repository.FilesInParallel(ctx, c.repo.Backend(), restic.IndexFile, defaultParallelism, + repository.ParallelWorkFuncParseID(worker)) + debug.Log("loading indexes finished, error: %v", err) + if err != nil { + panic(err) + } + }() + + done := make(chan struct{}) + defer close(done) + + packToIndex := make(map[restic.ID]restic.IDSet) + + for res := range indexCh { + debug.Log("process index %v, err %v", res.ID, res.err) + + if res.err != nil { + errs = append(errs, res.err) + continue + } + + idxID, err := restic.ParseID(res.ID) + if err != nil { + errs = append(errs, errors.Errorf("unable to parse as index ID: %v", res.ID)) + continue + } + + c.indexes[idxID] = res.Index + c.masterIndex.Insert(res.Index) + + debug.Log("process blobs") + cnt := 0 + for blob := range res.Index.Each(ctx) { + c.packs.Insert(blob.PackID) + c.blobs.Insert(blob.ID) + c.blobRefs.M[blob.ID] = 0 + cnt++ + + if _, ok := packToIndex[blob.PackID]; !ok { + packToIndex[blob.PackID] = restic.NewIDSet() + } + packToIndex[blob.PackID].Insert(idxID) + } + + debug.Log("%d blobs processed", cnt) + } + + debug.Log("checking for duplicate packs") + for packID := range c.packs { + debug.Log(" check pack %v: contained in %d indexes", packID, len(packToIndex[packID])) + if len(packToIndex[packID]) > 1 { + hints = append(hints, ErrDuplicatePacks{ + PackID: packID, + Indexes: packToIndex[packID], + }) + } + } + + err := c.repo.SetIndex(c.masterIndex) + if err != nil { + debug.Log("SetIndex returned error: %v", err) + errs = append(errs, err) + } + + return hints, errs +} + +// PackError describes an error with a specific pack. +type PackError struct { + ID restic.ID + Orphaned bool + Err error +} + +func (e PackError) Error() string { + return "pack " + e.ID.Str() + ": " + e.Err.Error() +} + +// IsOrphanedPack returns true if the error describes a pack which is not +// contained in any index. +func IsOrphanedPack(err error) bool { + if e, ok := errors.Cause(err).(PackError); ok && e.Orphaned { + return true + } + + return false +} + +// Packs checks that all packs referenced in the index are still available and +// there are no packs that aren't in an index. errChan is closed after all +// packs have been checked. +func (c *Checker) Packs(ctx context.Context, errChan chan<- error) { + defer close(errChan) + + debug.Log("checking for %d packs", len(c.packs)) + + debug.Log("listing repository packs") + repoPacks := restic.NewIDSet() + + err := c.repo.List(ctx, restic.DataFile, func(id restic.ID, size int64) error { + repoPacks.Insert(id) + return nil + }) + + if err != nil { + errChan <- err + } + + // orphaned: present in the repo but not in c.packs + for orphanID := range repoPacks.Sub(c.packs) { + select { + case <-ctx.Done(): + return + case errChan <- PackError{ID: orphanID, Orphaned: true, Err: errors.New("not referenced in any index")}: + } + } + + // missing: present in c.packs but not in the repo + for missingID := range c.packs.Sub(repoPacks) { + select { + case <-ctx.Done(): + return + case errChan <- PackError{ID: missingID, Err: errors.New("does not exist")}: + } + } +} + +// Error is an error that occurred while checking a repository. +type Error struct { + TreeID restic.ID + BlobID restic.ID + Err error +} + +func (e Error) Error() string { + if !e.BlobID.IsNull() && !e.TreeID.IsNull() { + msg := "tree " + e.TreeID.Str() + msg += ", blob " + e.BlobID.Str() + msg += ": " + e.Err.Error() + return msg + } + + if !e.TreeID.IsNull() { + return "tree " + e.TreeID.Str() + ": " + e.Err.Error() + } + + return e.Err.Error() +} + +func loadTreeFromSnapshot(ctx context.Context, repo restic.Repository, id restic.ID) (restic.ID, error) { + sn, err := restic.LoadSnapshot(ctx, repo, id) + if err != nil { + debug.Log("error loading snapshot %v: %v", id, err) + return restic.ID{}, err + } + + if sn.Tree == nil { + debug.Log("snapshot %v has no tree", id) + return restic.ID{}, errors.Errorf("snapshot %v has no tree", id) + } + + return *sn.Tree, nil +} + +// loadSnapshotTreeIDs loads all snapshots from backend and returns the tree IDs. +func loadSnapshotTreeIDs(ctx context.Context, repo restic.Repository) (restic.IDs, []error) { + var trees struct { + IDs restic.IDs + sync.Mutex + } + + var errs struct { + errs []error + sync.Mutex + } + + snapshotWorker := func(ctx context.Context, strID string) error { + id, err := restic.ParseID(strID) + if err != nil { + return err + } + + debug.Log("load snapshot %v", id) + + treeID, err := loadTreeFromSnapshot(ctx, repo, id) + if err != nil { + errs.Lock() + errs.errs = append(errs.errs, err) + errs.Unlock() + return nil + } + + debug.Log("snapshot %v has tree %v", id, treeID) + trees.Lock() + trees.IDs = append(trees.IDs, treeID) + trees.Unlock() + + return nil + } + + err := repository.FilesInParallel(ctx, repo.Backend(), restic.SnapshotFile, defaultParallelism, snapshotWorker) + if err != nil { + errs.errs = append(errs.errs, err) + } + + return trees.IDs, errs.errs +} + +// TreeError collects several errors that occurred while processing a tree. +type TreeError struct { + ID restic.ID + Errors []error +} + +func (e TreeError) Error() string { + return fmt.Sprintf("tree %v: %v", e.ID.Str(), e.Errors) +} + +type treeJob struct { + restic.ID + error + *restic.Tree +} + +// loadTreeWorker loads trees from repo and sends them to out. +func loadTreeWorker(ctx context.Context, repo restic.Repository, + in <-chan restic.ID, out chan<- treeJob, + wg *sync.WaitGroup) { + + defer func() { + debug.Log("exiting") + wg.Done() + }() + + var ( + inCh = in + outCh = out + job treeJob + ) + + outCh = nil + for { + select { + case <-ctx.Done(): + return + + case treeID, ok := <-inCh: + if !ok { + return + } + debug.Log("load tree %v", treeID) + + tree, err := repo.LoadTree(ctx, treeID) + debug.Log("load tree %v (%v) returned err: %v", tree, treeID, err) + job = treeJob{ID: treeID, error: err, Tree: tree} + outCh = out + inCh = nil + + case outCh <- job: + debug.Log("sent tree %v", job.ID) + outCh = nil + inCh = in + } + } +} + +// checkTreeWorker checks the trees received and sends out errors to errChan. +func (c *Checker) checkTreeWorker(ctx context.Context, in <-chan treeJob, out chan<- error, wg *sync.WaitGroup) { + defer func() { + debug.Log("exiting") + wg.Done() + }() + + var ( + inCh = in + outCh = out + treeError TreeError + ) + + outCh = nil + for { + select { + case <-ctx.Done(): + debug.Log("done channel closed, exiting") + return + + case job, ok := <-inCh: + if !ok { + debug.Log("input channel closed, exiting") + return + } + + id := job.ID + alreadyChecked := false + c.blobRefs.Lock() + if c.blobRefs.M[id] > 0 { + alreadyChecked = true + } + c.blobRefs.M[id]++ + debug.Log("tree %v refcount %d", job.ID, c.blobRefs.M[id]) + c.blobRefs.Unlock() + + if alreadyChecked { + continue + } + + debug.Log("check tree %v (tree %v, err %v)", job.ID, job.Tree, job.error) + + var errs []error + if job.error != nil { + errs = append(errs, job.error) + } else { + errs = c.checkTree(job.ID, job.Tree) + } + + if len(errs) > 0 { + debug.Log("checked tree %v: %v errors", job.ID, len(errs)) + treeError = TreeError{ID: job.ID, Errors: errs} + outCh = out + inCh = nil + } + + case outCh <- treeError: + debug.Log("tree %v: sent %d errors", treeError.ID, len(treeError.Errors)) + outCh = nil + inCh = in + } + } +} + +func filterTrees(ctx context.Context, backlog restic.IDs, loaderChan chan<- restic.ID, in <-chan treeJob, out chan<- treeJob) { + defer func() { + debug.Log("closing output channels") + close(loaderChan) + close(out) + }() + + var ( + inCh = in + outCh = out + loadCh = loaderChan + job treeJob + nextTreeID restic.ID + outstandingLoadTreeJobs = 0 + ) + + outCh = nil + loadCh = nil + + for { + if loadCh == nil && len(backlog) > 0 { + loadCh = loaderChan + nextTreeID, backlog = backlog[0], backlog[1:] + } + + if loadCh == nil && outCh == nil && outstandingLoadTreeJobs == 0 { + debug.Log("backlog is empty, all channels nil, exiting") + return + } + + select { + case <-ctx.Done(): + return + + case loadCh <- nextTreeID: + outstandingLoadTreeJobs++ + loadCh = nil + + case j, ok := <-inCh: + if !ok { + debug.Log("input channel closed") + inCh = nil + in = nil + continue + } + + outstandingLoadTreeJobs-- + + debug.Log("input job tree %v", j.ID) + + var err error + + if j.error != nil { + debug.Log("received job with error: %v (tree %v, ID %v)", j.error, j.Tree, j.ID) + } else if j.Tree == nil { + debug.Log("received job with nil tree pointer: %v (ID %v)", j.error, j.ID) + err = errors.New("tree is nil and error is nil") + } else { + debug.Log("subtrees for tree %v: %v", j.ID, j.Tree.Subtrees()) + for _, id := range j.Tree.Subtrees() { + if id.IsNull() { + // We do not need to raise this error here, it is + // checked when the tree is checked. Just make sure + // that we do not add any null IDs to the backlog. + debug.Log("tree %v has nil subtree", j.ID) + continue + } + backlog = append(backlog, id) + } + } + + if err != nil { + // send a new job with the new error instead of the old one + j = treeJob{ID: j.ID, error: err} + } + + job = j + outCh = out + inCh = nil + + case outCh <- job: + debug.Log("tree sent to check: %v", job.ID) + outCh = nil + inCh = in + } + } +} + +// Structure checks that for all snapshots all referenced data blobs and +// subtrees are available in the index. errChan is closed after all trees have +// been traversed. +func (c *Checker) Structure(ctx context.Context, errChan chan<- error) { + defer close(errChan) + + trees, errs := loadSnapshotTreeIDs(ctx, c.repo) + debug.Log("need to check %d trees from snapshots, %d errs returned", len(trees), len(errs)) + + for _, err := range errs { + select { + case <-ctx.Done(): + return + case errChan <- err: + } + } + + treeIDChan := make(chan restic.ID) + treeJobChan1 := make(chan treeJob) + treeJobChan2 := make(chan treeJob) + + var wg sync.WaitGroup + for i := 0; i < defaultParallelism; i++ { + wg.Add(2) + go loadTreeWorker(ctx, c.repo, treeIDChan, treeJobChan1, &wg) + go c.checkTreeWorker(ctx, treeJobChan2, errChan, &wg) + } + + filterTrees(ctx, trees, treeIDChan, treeJobChan1, treeJobChan2) + + wg.Wait() +} + +func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) { + debug.Log("checking tree %v", id) + + var blobs []restic.ID + + for _, node := range tree.Nodes { + switch node.Type { + case "file": + if node.Content == nil { + errs = append(errs, Error{TreeID: id, Err: errors.Errorf("file %q has nil blob list", node.Name)}) + } + + var size uint64 + for b, blobID := range node.Content { + if blobID.IsNull() { + errs = append(errs, Error{TreeID: id, Err: errors.Errorf("file %q blob %d has null ID", node.Name, b)}) + continue + } + blobs = append(blobs, blobID) + blobSize, found := c.repo.LookupBlobSize(blobID, restic.DataBlob) + if !found { + errs = append(errs, Error{TreeID: id, Err: errors.Errorf("file %q blob %d size could not be found", node.Name, b)}) + } + size += uint64(blobSize) + } + case "dir": + if node.Subtree == nil { + errs = append(errs, Error{TreeID: id, Err: errors.Errorf("dir node %q has no subtree", node.Name)}) + continue + } + + if node.Subtree.IsNull() { + errs = append(errs, Error{TreeID: id, Err: errors.Errorf("dir node %q subtree id is null", node.Name)}) + continue + } + + case "symlink", "socket", "chardev", "dev", "fifo": + // nothing to check + + default: + errs = append(errs, Error{TreeID: id, Err: errors.Errorf("node %q with invalid type %q", node.Name, node.Type)}) + } + + if node.Name == "" { + errs = append(errs, Error{TreeID: id, Err: errors.New("node with empty name")}) + } + } + + for _, blobID := range blobs { + c.blobRefs.Lock() + c.blobRefs.M[blobID]++ + debug.Log("blob %v refcount %d", blobID, c.blobRefs.M[blobID]) + c.blobRefs.Unlock() + + if !c.blobs.Has(blobID) { + debug.Log("tree %v references blob %v which isn't contained in index", id, blobID) + + errs = append(errs, Error{TreeID: id, BlobID: blobID, Err: errors.New("not found in index")}) + } + } + + return errs +} + +// UnusedBlobs returns all blobs that have never been referenced. +func (c *Checker) UnusedBlobs() (blobs restic.IDs) { + c.blobRefs.Lock() + defer c.blobRefs.Unlock() + + debug.Log("checking %d blobs", len(c.blobs)) + for id := range c.blobs { + if c.blobRefs.M[id] == 0 { + debug.Log("blob %v not referenced", id) + blobs = append(blobs, id) + } + } + + return blobs +} + +// CountPacks returns the number of packs in the repository. +func (c *Checker) CountPacks() uint64 { + return uint64(len(c.packs)) +} + +// GetPacks returns IDSet of packs in the repository +func (c *Checker) GetPacks() restic.IDSet { + return c.packs +} + +// checkPack reads a pack and checks the integrity of all blobs. +func checkPack(ctx context.Context, r restic.Repository, id restic.ID) error { + debug.Log("checking pack %v", id) + h := restic.Handle{Type: restic.DataFile, Name: id.String()} + + packfile, hash, size, err := repository.DownloadAndHash(ctx, r.Backend(), h) + if err != nil { + return errors.Wrap(err, "checkPack") + } + + defer func() { + _ = packfile.Close() + _ = os.Remove(packfile.Name()) + }() + + debug.Log("hash for pack %v is %v", id, hash) + + if !hash.Equal(id) { + debug.Log("Pack ID does not match, want %v, got %v", id, hash) + return errors.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str()) + } + + blobs, err := pack.List(r.Key(), packfile, size) + if err != nil { + return err + } + + var errs []error + var buf []byte + for i, blob := range blobs { + debug.Log(" check blob %d: %v", i, blob) + + buf = buf[:cap(buf)] + if uint(len(buf)) < blob.Length { + buf = make([]byte, blob.Length) + } + buf = buf[:blob.Length] + + _, err := packfile.Seek(int64(blob.Offset), 0) + if err != nil { + return errors.Errorf("Seek(%v): %v", blob.Offset, err) + } + + _, err = io.ReadFull(packfile, buf) + if err != nil { + debug.Log(" error loading blob %v: %v", blob.ID, err) + errs = append(errs, errors.Errorf("blob %v: %v", i, err)) + continue + } + + nonce, ciphertext := buf[:r.Key().NonceSize()], buf[r.Key().NonceSize():] + plaintext, err := r.Key().Open(ciphertext[:0], nonce, ciphertext, nil) + if err != nil { + debug.Log(" error decrypting blob %v: %v", blob.ID, err) + errs = append(errs, errors.Errorf("blob %v: %v", i, err)) + continue + } + + hash := restic.Hash(plaintext) + if !hash.Equal(blob.ID) { + debug.Log(" Blob ID does not match, want %v, got %v", blob.ID, hash) + errs = append(errs, errors.Errorf("Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str())) + continue + } + } + + if len(errs) > 0 { + return errors.Errorf("pack %v contains %v errors: %v", id.Str(), len(errs), errs) + } + + return nil +} + +// ReadData loads all data from the repository and checks the integrity. +func (c *Checker) ReadData(ctx context.Context, p *restic.Progress, errChan chan<- error) { + c.ReadPacks(ctx, c.packs, p, errChan) +} + +// ReadPacks loads data from specified packs and checks the integrity. +func (c *Checker) ReadPacks(ctx context.Context, packs restic.IDSet, p *restic.Progress, errChan chan<- error) { + defer close(errChan) + + p.Start() + defer p.Done() + + g, ctx := errgroup.WithContext(ctx) + ch := make(chan restic.ID) + + // run workers + for i := 0; i < defaultParallelism; i++ { + g.Go(func() error { + for { + var id restic.ID + var ok bool + + select { + case <-ctx.Done(): + return nil + case id, ok = <-ch: + if !ok { + return nil + } + } + + err := checkPack(ctx, c.repo, id) + p.Report(restic.Stat{Blobs: 1}) + if err == nil { + continue + } + + select { + case <-ctx.Done(): + return nil + case errChan <- err: + } + } + }) + } + + // push packs to ch + for pack := range packs { + select { + case ch <- pack: + case <-ctx.Done(): + } + } + close(ch) + + err := g.Wait() + if err != nil { + select { + case <-ctx.Done(): + return + case errChan <- err: + } + } +} diff --git a/internal/checker/checker_test.go b/internal/checker/checker_test.go new file mode 100644 index 000000000..fa7d9e751 --- /dev/null +++ b/internal/checker/checker_test.go @@ -0,0 +1,389 @@ +package checker_test + +import ( + "context" + "io" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "sort" + "testing" + + "github.com/restic/restic/internal/archiver" + "github.com/restic/restic/internal/checker" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/test" +) + +var checkerTestData = filepath.Join("testdata", "checker-test-repo.tar.gz") + +func collectErrors(ctx context.Context, f func(context.Context, chan<- error)) (errs []error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + errChan := make(chan error) + + go f(ctx, errChan) + + for err := range errChan { + errs = append(errs, err) + } + + return errs +} + +func checkPacks(chkr *checker.Checker) []error { + return collectErrors(context.TODO(), chkr.Packs) +} + +func checkStruct(chkr *checker.Checker) []error { + return collectErrors(context.TODO(), chkr.Structure) +} + +func checkData(chkr *checker.Checker) []error { + return collectErrors( + context.TODO(), + func(ctx context.Context, errCh chan<- error) { + chkr.ReadData(ctx, nil, errCh) + }, + ) +} + +func TestCheckRepo(t *testing.T) { + repodir, cleanup := test.Env(t, checkerTestData) + defer cleanup() + + repo := repository.TestOpenLocal(t, repodir) + + chkr := checker.New(repo) + hints, errs := chkr.LoadIndex(context.TODO()) + if len(errs) > 0 { + t.Fatalf("expected no errors, got %v: %v", len(errs), errs) + } + + if len(hints) > 0 { + t.Errorf("expected no hints, got %v: %v", len(hints), hints) + } + + test.OKs(t, checkPacks(chkr)) + test.OKs(t, checkStruct(chkr)) +} + +func TestMissingPack(t *testing.T) { + repodir, cleanup := test.Env(t, checkerTestData) + defer cleanup() + + repo := repository.TestOpenLocal(t, repodir) + + packHandle := restic.Handle{ + Type: restic.DataFile, + Name: "657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6", + } + test.OK(t, repo.Backend().Remove(context.TODO(), packHandle)) + + chkr := checker.New(repo) + hints, errs := chkr.LoadIndex(context.TODO()) + if len(errs) > 0 { + t.Fatalf("expected no errors, got %v: %v", len(errs), errs) + } + + if len(hints) > 0 { + t.Errorf("expected no hints, got %v: %v", len(hints), hints) + } + + errs = checkPacks(chkr) + + test.Assert(t, len(errs) == 1, + "expected exactly one error, got %v", len(errs)) + + if err, ok := errs[0].(checker.PackError); ok { + test.Equals(t, packHandle.Name, err.ID.String()) + } else { + t.Errorf("expected error returned by checker.Packs() to be PackError, got %v", err) + } +} + +func TestUnreferencedPack(t *testing.T) { + repodir, cleanup := test.Env(t, checkerTestData) + defer cleanup() + + repo := repository.TestOpenLocal(t, repodir) + + // index 3f1a only references pack 60e0 + packID := "60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e" + indexHandle := restic.Handle{ + Type: restic.IndexFile, + Name: "3f1abfcb79c6f7d0a3be517d2c83c8562fba64ef2c8e9a3544b4edaf8b5e3b44", + } + test.OK(t, repo.Backend().Remove(context.TODO(), indexHandle)) + + chkr := checker.New(repo) + hints, errs := chkr.LoadIndex(context.TODO()) + if len(errs) > 0 { + t.Fatalf("expected no errors, got %v: %v", len(errs), errs) + } + + if len(hints) > 0 { + t.Errorf("expected no hints, got %v: %v", len(hints), hints) + } + + errs = checkPacks(chkr) + + test.Assert(t, len(errs) == 1, + "expected exactly one error, got %v", len(errs)) + + if err, ok := errs[0].(checker.PackError); ok { + test.Equals(t, packID, err.ID.String()) + } else { + t.Errorf("expected error returned by checker.Packs() to be PackError, got %v", err) + } +} + +func TestUnreferencedBlobs(t *testing.T) { + repodir, cleanup := test.Env(t, checkerTestData) + defer cleanup() + + repo := repository.TestOpenLocal(t, repodir) + + snapshotHandle := restic.Handle{ + Type: restic.SnapshotFile, + Name: "51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02", + } + test.OK(t, repo.Backend().Remove(context.TODO(), snapshotHandle)) + + unusedBlobsBySnapshot := restic.IDs{ + restic.TestParseID("58c748bbe2929fdf30c73262bd8313fe828f8925b05d1d4a87fe109082acb849"), + restic.TestParseID("988a272ab9768182abfd1fe7d7a7b68967825f0b861d3b36156795832c772235"), + restic.TestParseID("c01952de4d91da1b1b80bc6e06eaa4ec21523f4853b69dc8231708b9b7ec62d8"), + restic.TestParseID("bec3a53d7dc737f9a9bee68b107ec9e8ad722019f649b34d474b9982c3a3fec7"), + restic.TestParseID("2a6f01e5e92d8343c4c6b78b51c5a4dc9c39d42c04e26088c7614b13d8d0559d"), + restic.TestParseID("18b51b327df9391732ba7aaf841a4885f350d8a557b2da8352c9acf8898e3f10"), + } + + sort.Sort(unusedBlobsBySnapshot) + + chkr := checker.New(repo) + hints, errs := chkr.LoadIndex(context.TODO()) + if len(errs) > 0 { + t.Fatalf("expected no errors, got %v: %v", len(errs), errs) + } + + if len(hints) > 0 { + t.Errorf("expected no hints, got %v: %v", len(hints), hints) + } + + test.OKs(t, checkPacks(chkr)) + test.OKs(t, checkStruct(chkr)) + + blobs := chkr.UnusedBlobs() + sort.Sort(blobs) + + test.Equals(t, unusedBlobsBySnapshot, blobs) +} + +func TestModifiedIndex(t *testing.T) { + repodir, cleanup := test.Env(t, checkerTestData) + defer cleanup() + + repo := repository.TestOpenLocal(t, repodir) + + done := make(chan struct{}) + defer close(done) + + h := restic.Handle{ + Type: restic.IndexFile, + Name: "90f838b4ac28735fda8644fe6a08dbc742e57aaf81b30977b4fefa357010eafd", + } + + tmpfile, err := ioutil.TempFile("", "restic-test-mod-index-") + if err != nil { + t.Fatal(err) + } + defer func() { + err := tmpfile.Close() + if err != nil { + t.Fatal(err) + } + + err = os.Remove(tmpfile.Name()) + if err != nil { + t.Fatal(err) + } + }() + + // read the file from the backend + err = repo.Backend().Load(context.TODO(), h, 0, 0, func(rd io.Reader) error { + _, err := io.Copy(tmpfile, rd) + return err + }) + test.OK(t, err) + + // save the index again with a modified name so that the hash doesn't match + // the content any more + h2 := restic.Handle{ + Type: restic.IndexFile, + Name: "80f838b4ac28735fda8644fe6a08dbc742e57aaf81b30977b4fefa357010eafd", + } + + rd, err := restic.NewFileReader(tmpfile) + if err != nil { + t.Fatal(err) + } + + err = repo.Backend().Save(context.TODO(), h2, rd) + if err != nil { + t.Fatal(err) + } + + chkr := checker.New(repo) + hints, errs := chkr.LoadIndex(context.TODO()) + if len(errs) == 0 { + t.Fatalf("expected errors not found") + } + + for _, err := range errs { + t.Logf("found expected error %v", err) + } + + if len(hints) > 0 { + t.Errorf("expected no hints, got %v: %v", len(hints), hints) + } +} + +var checkerDuplicateIndexTestData = filepath.Join("testdata", "duplicate-packs-in-index-test-repo.tar.gz") + +func TestDuplicatePacksInIndex(t *testing.T) { + repodir, cleanup := test.Env(t, checkerDuplicateIndexTestData) + defer cleanup() + + repo := repository.TestOpenLocal(t, repodir) + + chkr := checker.New(repo) + hints, errs := chkr.LoadIndex(context.TODO()) + if len(hints) == 0 { + t.Fatalf("did not get expected checker hints for duplicate packs in indexes") + } + + found := false + for _, hint := range hints { + if _, ok := hint.(checker.ErrDuplicatePacks); ok { + found = true + } else { + t.Errorf("got unexpected hint: %v", hint) + } + } + + if !found { + t.Fatalf("did not find hint ErrDuplicatePacks") + } + + if len(errs) > 0 { + t.Errorf("expected no errors, got %v: %v", len(errs), errs) + } +} + +// errorBackend randomly modifies data after reading. +type errorBackend struct { + restic.Backend + ProduceErrors bool +} + +func (b errorBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64, consumer func(rd io.Reader) error) error { + return b.Backend.Load(ctx, h, length, offset, func(rd io.Reader) error { + if b.ProduceErrors { + return consumer(errorReadCloser{rd}) + } + return consumer(rd) + }) +} + +type errorReadCloser struct { + io.Reader +} + +func (erd errorReadCloser) Read(p []byte) (int, error) { + n, err := erd.Reader.Read(p) + if n > 0 { + induceError(p[:n]) + } + return n, err +} + +// induceError flips a bit in the slice. +func induceError(data []byte) { + if rand.Float32() < 0.2 { + return + } + + pos := rand.Intn(len(data)) + data[pos] ^= 1 +} + +func TestCheckerModifiedData(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + sn := archiver.TestSnapshot(t, repo, ".", nil) + t.Logf("archived as %v", sn.ID().Str()) + + beError := &errorBackend{Backend: repo.Backend()} + checkRepo := repository.New(beError) + test.OK(t, checkRepo.SearchKey(context.TODO(), test.TestPassword, 5, "")) + + chkr := checker.New(checkRepo) + + hints, errs := chkr.LoadIndex(context.TODO()) + if len(errs) > 0 { + t.Fatalf("expected no errors, got %v: %v", len(errs), errs) + } + + if len(hints) > 0 { + t.Errorf("expected no hints, got %v: %v", len(hints), hints) + } + + beError.ProduceErrors = true + errFound := false + for _, err := range checkPacks(chkr) { + t.Logf("pack error: %v", err) + } + + for _, err := range checkStruct(chkr) { + t.Logf("struct error: %v", err) + } + + for _, err := range checkData(chkr) { + t.Logf("data error: %v", err) + errFound = true + } + + if !errFound { + t.Fatal("no error found, checker is broken") + } +} + +func BenchmarkChecker(t *testing.B) { + repodir, cleanup := test.Env(t, checkerTestData) + defer cleanup() + + repo := repository.TestOpenLocal(t, repodir) + + chkr := checker.New(repo) + hints, errs := chkr.LoadIndex(context.TODO()) + if len(errs) > 0 { + t.Fatalf("expected no errors, got %v: %v", len(errs), errs) + } + + if len(hints) > 0 { + t.Errorf("expected no hints, got %v: %v", len(hints), hints) + } + + t.ResetTimer() + + for i := 0; i < t.N; i++ { + test.OKs(t, checkPacks(chkr)) + test.OKs(t, checkStruct(chkr)) + test.OKs(t, checkData(chkr)) + } +} diff --git a/internal/checker/testdata/checker-test-repo.tar.gz b/internal/checker/testdata/checker-test-repo.tar.gz new file mode 100644 index 000000000..793eb94de Binary files /dev/null and b/internal/checker/testdata/checker-test-repo.tar.gz differ diff --git a/internal/checker/testdata/duplicate-packs-in-index-test-repo.tar.gz b/internal/checker/testdata/duplicate-packs-in-index-test-repo.tar.gz new file mode 100644 index 000000000..f0e194d8d Binary files /dev/null and b/internal/checker/testdata/duplicate-packs-in-index-test-repo.tar.gz differ diff --git a/internal/checker/testing.go b/internal/checker/testing.go new file mode 100644 index 000000000..4a7bf48d2 --- /dev/null +++ b/internal/checker/testing.go @@ -0,0 +1,52 @@ +package checker + +import ( + "context" + "testing" + + "github.com/restic/restic/internal/restic" +) + +// TestCheckRepo runs the checker on repo. +func TestCheckRepo(t testing.TB, repo restic.Repository) { + chkr := New(repo) + + hints, errs := chkr.LoadIndex(context.TODO()) + if len(errs) != 0 { + t.Fatalf("errors loading index: %v", errs) + } + + if len(hints) != 0 { + t.Fatalf("errors loading index: %v", hints) + } + + // packs + errChan := make(chan error) + go chkr.Packs(context.TODO(), errChan) + + for err := range errChan { + t.Error(err) + } + + // structure + errChan = make(chan error) + go chkr.Structure(context.TODO(), errChan) + + for err := range errChan { + t.Error(err) + } + + // unused blobs + blobs := chkr.UnusedBlobs() + if len(blobs) > 0 { + t.Errorf("unused blobs found: %v", blobs) + } + + // read data + errChan = make(chan error) + go chkr.ReadData(context.TODO(), nil, errChan) + + for err := range errChan { + t.Error(err) + } +} diff --git a/internal/crypto/crypto.go b/internal/crypto/crypto.go new file mode 100644 index 000000000..b56a9da2e --- /dev/null +++ b/internal/crypto/crypto.go @@ -0,0 +1,371 @@ +package crypto + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "encoding/json" + "fmt" + + "github.com/restic/restic/internal/errors" + + "golang.org/x/crypto/poly1305" +) + +const ( + aesKeySize = 32 // for AES-256 + macKeySizeK = 16 // for AES-128 + macKeySizeR = 16 // for Poly1305 + macKeySize = macKeySizeK + macKeySizeR // for Poly1305-AES128 + ivSize = aes.BlockSize + + macSize = poly1305.TagSize + + // Extension is the number of bytes a plaintext is enlarged by encrypting it. + Extension = ivSize + macSize +) + +var ( + // ErrUnauthenticated is returned when ciphertext verification has failed. + ErrUnauthenticated = errors.New("ciphertext verification failed") +) + +// Key holds encryption and message authentication keys for a repository. It is stored +// encrypted and authenticated as a JSON data structure in the Data field of the Key +// structure. +type Key struct { + MACKey `json:"mac"` + EncryptionKey `json:"encrypt"` +} + +// EncryptionKey is key used for encryption +type EncryptionKey [32]byte + +// MACKey is used to sign (authenticate) data. +type MACKey struct { + K [16]byte // for AES-128 + R [16]byte // for Poly1305 + + masked bool // remember if the MAC key has already been masked +} + +// mask for key, (cf. http://cr.yp.to/mac/poly1305-20050329.pdf) +var poly1305KeyMask = [16]byte{ + 0xff, + 0xff, + 0xff, + 0x0f, // 3: top four bits zero + 0xfc, // 4: bottom two bits zero + 0xff, + 0xff, + 0x0f, // 7: top four bits zero + 0xfc, // 8: bottom two bits zero + 0xff, + 0xff, + 0x0f, // 11: top four bits zero + 0xfc, // 12: bottom two bits zero + 0xff, + 0xff, + 0x0f, // 15: top four bits zero +} + +func poly1305MAC(msg []byte, nonce []byte, key *MACKey) []byte { + k := poly1305PrepareKey(nonce, key) + + var out [16]byte + poly1305.Sum(&out, msg, &k) + + return out[:] +} + +// mask poly1305 key +func maskKey(k *MACKey) { + if k == nil || k.masked { + return + } + + for i := 0; i < poly1305.TagSize; i++ { + k.R[i] = k.R[i] & poly1305KeyMask[i] + } + + k.masked = true +} + +// construct mac key from slice (k||r), with masking +func macKeyFromSlice(mk *MACKey, data []byte) { + copy(mk.K[:], data[:16]) + copy(mk.R[:], data[16:32]) + maskKey(mk) +} + +// prepare key for low-level poly1305.Sum(): r||n +func poly1305PrepareKey(nonce []byte, key *MACKey) [32]byte { + var k [32]byte + + maskKey(key) + + cipher, err := aes.NewCipher(key.K[:]) + if err != nil { + panic(err) + } + cipher.Encrypt(k[16:], nonce[:]) + + copy(k[:16], key.R[:]) + + return k +} + +func poly1305Verify(msg []byte, nonce []byte, key *MACKey, mac []byte) bool { + k := poly1305PrepareKey(nonce, key) + + var m [16]byte + copy(m[:], mac) + + return poly1305.Verify(&m, msg, &k) +} + +// NewRandomKey returns new encryption and message authentication keys. +func NewRandomKey() *Key { + k := &Key{} + + n, err := rand.Read(k.EncryptionKey[:]) + if n != aesKeySize || err != nil { + panic("unable to read enough random bytes for encryption key") + } + + n, err = rand.Read(k.MACKey.K[:]) + if n != macKeySizeK || err != nil { + panic("unable to read enough random bytes for MAC encryption key") + } + + n, err = rand.Read(k.MACKey.R[:]) + if n != macKeySizeR || err != nil { + panic("unable to read enough random bytes for MAC key") + } + + maskKey(&k.MACKey) + return k +} + +// NewRandomNonce returns a new random nonce. It panics on error so that the +// program is safely terminated. +func NewRandomNonce() []byte { + iv := make([]byte, ivSize) + n, err := rand.Read(iv) + if n != ivSize || err != nil { + panic("unable to read enough random bytes for iv") + } + return iv +} + +type jsonMACKey struct { + K []byte `json:"k"` + R []byte `json:"r"` +} + +// MarshalJSON converts the MACKey to JSON. +func (m *MACKey) MarshalJSON() ([]byte, error) { + return json.Marshal(jsonMACKey{K: m.K[:], R: m.R[:]}) +} + +// UnmarshalJSON fills the key m with data from the JSON representation. +func (m *MACKey) UnmarshalJSON(data []byte) error { + j := jsonMACKey{} + err := json.Unmarshal(data, &j) + if err != nil { + return errors.Wrap(err, "Unmarshal") + } + copy(m.K[:], j.K) + copy(m.R[:], j.R) + + return nil +} + +// Valid tests whether the key k is valid (i.e. not zero). +func (m *MACKey) Valid() bool { + nonzeroK := false + for i := 0; i < len(m.K); i++ { + if m.K[i] != 0 { + nonzeroK = true + } + } + + if !nonzeroK { + return false + } + + for i := 0; i < len(m.R); i++ { + if m.R[i] != 0 { + return true + } + } + + return false +} + +// MarshalJSON converts the EncryptionKey to JSON. +func (k *EncryptionKey) MarshalJSON() ([]byte, error) { + return json.Marshal(k[:]) +} + +// UnmarshalJSON fills the key k with data from the JSON representation. +func (k *EncryptionKey) UnmarshalJSON(data []byte) error { + d := make([]byte, aesKeySize) + err := json.Unmarshal(data, &d) + if err != nil { + return errors.Wrap(err, "Unmarshal") + } + copy(k[:], d) + + return nil +} + +// Valid tests whether the key k is valid (i.e. not zero). +func (k *EncryptionKey) Valid() bool { + for i := 0; i < len(k); i++ { + if k[i] != 0 { + return true + } + } + + return false +} + +// ErrInvalidCiphertext is returned when trying to encrypt into the slice that +// holds the plaintext. +var ErrInvalidCiphertext = errors.New("invalid ciphertext, same slice used for plaintext") + +// validNonce checks that nonce is not all zero. +func validNonce(nonce []byte) bool { + var sum byte + for _, b := range nonce { + sum |= b + } + return sum > 0 +} + +// statically ensure that *Key implements crypto/cipher.AEAD +var _ cipher.AEAD = &Key{} + +// NonceSize returns the size of the nonce that must be passed to Seal +// and Open. +func (k *Key) NonceSize() int { + return ivSize +} + +// Overhead returns the maximum difference between the lengths of a +// plaintext and its ciphertext. +func (k *Key) Overhead() int { + return macSize +} + +// sliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +// +// taken from the stdlib, crypto/aes/aes_gcm.go +func sliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return +} + +// Seal encrypts and authenticates plaintext, authenticates the +// additional data and appends the result to dst, returning the updated +// slice. The nonce must be NonceSize() bytes long and unique for all +// time, for a given key. +// +// The plaintext and dst may alias exactly or not at all. To reuse +// plaintext's storage for the encrypted output, use plaintext[:0] as dst. +func (k *Key) Seal(dst, nonce, plaintext, additionalData []byte) []byte { + if !k.Valid() { + panic("key is invalid") + } + + if len(additionalData) > 0 { + panic("additional data is not supported") + } + + if len(nonce) != ivSize { + panic("incorrect nonce length") + } + + if !validNonce(nonce) { + panic("nonce is invalid") + } + + ret, out := sliceForAppend(dst, len(plaintext)+k.Overhead()) + + c, err := aes.NewCipher(k.EncryptionKey[:]) + if err != nil { + panic(fmt.Sprintf("unable to create cipher: %v", err)) + } + e := cipher.NewCTR(c, nonce) + e.XORKeyStream(out, plaintext) + + mac := poly1305MAC(out[:len(plaintext)], nonce, &k.MACKey) + copy(out[len(plaintext):], mac) + + return ret +} + +// Open decrypts and authenticates ciphertext, authenticates the +// additional data and, if successful, appends the resulting plaintext +// to dst, returning the updated slice. The nonce must be NonceSize() +// bytes long and both it and the additional data must match the +// value passed to Seal. +// +// The ciphertext and dst may alias exactly or not at all. To reuse +// ciphertext's storage for the decrypted output, use ciphertext[:0] as dst. +// +// Even if the function fails, the contents of dst, up to its capacity, +// may be overwritten. +func (k *Key) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + if !k.Valid() { + return nil, errors.New("invalid key") + } + + // check parameters + if len(nonce) != ivSize { + panic("incorrect nonce length") + } + + if !validNonce(nonce) { + return nil, errors.New("nonce is invalid") + } + + // check for plausible length + if len(ciphertext) < k.Overhead() { + return nil, errors.Errorf("trying to decrypt invalid data: ciphertext too small") + } + + l := len(ciphertext) - macSize + ct, mac := ciphertext[:l], ciphertext[l:] + + // verify mac + if !poly1305Verify(ct, nonce, &k.MACKey, mac) { + return nil, ErrUnauthenticated + } + + ret, out := sliceForAppend(dst, len(ct)) + + c, err := aes.NewCipher(k.EncryptionKey[:]) + if err != nil { + panic(fmt.Sprintf("unable to create cipher: %v", err)) + } + e := cipher.NewCTR(c, nonce) + e.XORKeyStream(out, ct) + + return ret, nil +} + +// Valid tests if the key is valid. +func (k *Key) Valid() bool { + return k.EncryptionKey.Valid() && k.MACKey.Valid() +} diff --git a/internal/crypto/crypto_int_test.go b/internal/crypto/crypto_int_test.go new file mode 100644 index 000000000..769f34d1e --- /dev/null +++ b/internal/crypto/crypto_int_test.go @@ -0,0 +1,192 @@ +package crypto + +import ( + "bytes" + "encoding/hex" + "testing" +) + +// test vectors from http://cr.yp.to/mac/poly1305-20050329.pdf +var poly1305Tests = []struct { + msg []byte + r []byte + k []byte + nonce []byte + mac []byte +}{ + { + []byte("\xf3\xf6"), + []byte("\x85\x1f\xc4\x0c\x34\x67\xac\x0b\xe0\x5c\xc2\x04\x04\xf3\xf7\x00"), + []byte("\xec\x07\x4c\x83\x55\x80\x74\x17\x01\x42\x5b\x62\x32\x35\xad\xd6"), + []byte("\xfb\x44\x73\x50\xc4\xe8\x68\xc5\x2a\xc3\x27\x5c\xf9\xd4\x32\x7e"), + []byte("\xf4\xc6\x33\xc3\x04\x4f\xc1\x45\xf8\x4f\x33\x5c\xb8\x19\x53\xde"), + }, + { + []byte(""), + []byte("\xa0\xf3\x08\x00\x00\xf4\x64\x00\xd0\xc7\xe9\x07\x6c\x83\x44\x03"), + []byte("\x75\xde\xaa\x25\xc0\x9f\x20\x8e\x1d\xc4\xce\x6b\x5c\xad\x3f\xbf"), + []byte("\x61\xee\x09\x21\x8d\x29\xb0\xaa\xed\x7e\x15\x4a\x2c\x55\x09\xcc"), + []byte("\xdd\x3f\xab\x22\x51\xf1\x1a\xc7\x59\xf0\x88\x71\x29\xcc\x2e\xe7"), + }, + { + []byte("\x66\x3c\xea\x19\x0f\xfb\x83\xd8\x95\x93\xf3\xf4\x76\xb6\xbc\x24\xd7\xe6\x79\x10\x7e\xa2\x6a\xdb\x8c\xaf\x66\x52\xd0\x65\x61\x36"), + []byte("\x48\x44\x3d\x0b\xb0\xd2\x11\x09\xc8\x9a\x10\x0b\x5c\xe2\xc2\x08"), + []byte("\x6a\xcb\x5f\x61\xa7\x17\x6d\xd3\x20\xc5\xc1\xeb\x2e\xdc\xdc\x74"), + []byte("\xae\x21\x2a\x55\x39\x97\x29\x59\x5d\xea\x45\x8b\xc6\x21\xff\x0e"), + []byte("\x0e\xe1\xc1\x6b\xb7\x3f\x0f\x4f\xd1\x98\x81\x75\x3c\x01\xcd\xbe"), + }, { + []byte("\xab\x08\x12\x72\x4a\x7f\x1e\x34\x27\x42\xcb\xed\x37\x4d\x94\xd1\x36\xc6\xb8\x79\x5d\x45\xb3\x81\x98\x30\xf2\xc0\x44\x91\xfa\xf0\x99\x0c\x62\xe4\x8b\x80\x18\xb2\xc3\xe4\xa0\xfa\x31\x34\xcb\x67\xfa\x83\xe1\x58\xc9\x94\xd9\x61\xc4\xcb\x21\x09\x5c\x1b\xf9"), + []byte("\x12\x97\x6a\x08\xc4\x42\x6d\x0c\xe8\xa8\x24\x07\xc4\xf4\x82\x07"), + []byte("\xe1\xa5\x66\x8a\x4d\x5b\x66\xa5\xf6\x8c\xc5\x42\x4e\xd5\x98\x2d"), + []byte("\x9a\xe8\x31\xe7\x43\x97\x8d\x3a\x23\x52\x7c\x71\x28\x14\x9e\x3a"), + []byte("\x51\x54\xad\x0d\x2c\xb2\x6e\x01\x27\x4f\xc5\x11\x48\x49\x1f\x1b"), + }, +} + +func TestPoly1305(t *testing.T) { + for _, test := range poly1305Tests { + key := &MACKey{} + copy(key.K[:], test.k) + copy(key.R[:], test.r) + mac := poly1305MAC(test.msg, test.nonce, key) + + if !bytes.Equal(mac, test.mac) { + t.Fatalf("wrong mac calculated, want: %02x, got: %02x", test.mac, mac) + } + + if !poly1305Verify(test.msg, test.nonce, key, test.mac) { + t.Fatalf("mac does not verify: mac: %02x", test.mac) + } + } +} + +var testValues = []struct { + ekey EncryptionKey + skey MACKey + ciphertext []byte + plaintext []byte +}{ + { + ekey: decodeArray32("303e8687b1d7db18421bdc6bb8588ccadac4d59ee87b8ff70c44e635790cafef"), + skey: MACKey{ + K: decodeArray16("ef4d8824cb80b2bcc5fbff8a9b12a42c"), + R: decodeArray16("cc8d4b948ee0ebfe1d415de921d10353"), + }, + ciphertext: decodeHex("69fb41c62d12def4593bd71757138606338f621aeaeb39da0fe4f99233f8037a54ea63338a813bcf3f75d8c3cc75dddf8750"), + plaintext: []byte("Dies ist ein Test!"), + }, +} + +func decodeArray16(s string) (dst [16]byte) { + data := decodeHex(s) + if len(data) != 16 { + panic("data has wrong length") + } + copy(dst[:], data) + return +} + +func decodeArray32(s string) (dst [32]byte) { + data := decodeHex(s) + if len(data) != 32 { + panic("data has wrong length") + } + copy(dst[:], data) + return +} + +// decodeHex decodes the string s and panics on error. +func decodeHex(s string) []byte { + d, err := hex.DecodeString(s) + if err != nil { + panic(err) + } + return d +} + +func TestCrypto(t *testing.T) { + msg := make([]byte, 0, 8*1024*1024) // use 8MiB for now + for _, tv := range testValues { + // test encryption + k := &Key{ + EncryptionKey: tv.ekey, + MACKey: tv.skey, + } + + nonce := NewRandomNonce() + ciphertext := k.Seal(msg[0:], nonce, tv.plaintext, nil) + + // decrypt message + buf := make([]byte, 0, len(tv.plaintext)) + buf, err := k.Open(buf, nonce, ciphertext, nil) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(buf, tv.plaintext) { + t.Fatalf("wrong plaintext returned") + } + + // change mac, this must fail + ciphertext[len(ciphertext)-8] ^= 0x23 + + if _, err = k.Open(buf[:0], nonce, ciphertext, nil); err != ErrUnauthenticated { + t.Fatal("wrong MAC value not detected") + } + // reset mac + ciphertext[len(ciphertext)-8] ^= 0x23 + + // tamper with nonce, this must fail + nonce[2] ^= 0x88 + if _, err = k.Open(buf[:0], nonce, ciphertext, nil); err != ErrUnauthenticated { + t.Fatal("tampered nonce not detected") + } + // reset nonce + nonce[2] ^= 0x88 + + // tamper with message, this must fail + ciphertext[16+5] ^= 0x85 + if _, err = k.Open(buf[:0], nonce, ciphertext, nil); err != ErrUnauthenticated { + t.Fatal("tampered message not detected") + } + + // test decryption + p := make([]byte, len(tv.ciphertext)) + nonce, ciphertext = tv.ciphertext[:16], tv.ciphertext[16:] + p, err = k.Open(p[:0], nonce, ciphertext, nil) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(p, tv.plaintext) { + t.Fatalf("wrong plaintext: expected %q but got %q\n", tv.plaintext, p) + } + } +} + +func TestNonceVadlid(t *testing.T) { + nonce := make([]byte, ivSize) + + if validNonce(nonce) { + t.Error("null nonce detected as valid") + } + + for i := 0; i < 100; i++ { + nonce = NewRandomNonce() + if !validNonce(nonce) { + t.Errorf("random nonce not detected as valid: %02x", nonce) + } + } +} + +func BenchmarkNonceValid(b *testing.B) { + nonce := NewRandomNonce() + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + if !validNonce(nonce) { + b.Fatal("nonce is invalid") + } + } +} diff --git a/internal/crypto/crypto_test.go b/internal/crypto/crypto_test.go new file mode 100644 index 000000000..47debe896 --- /dev/null +++ b/internal/crypto/crypto_test.go @@ -0,0 +1,302 @@ +package crypto_test + +import ( + "bytes" + "crypto/rand" + "io" + "testing" + + "github.com/restic/restic/internal/crypto" + rtest "github.com/restic/restic/internal/test" + + "github.com/restic/chunker" +) + +const testLargeCrypto = false + +func TestEncryptDecrypt(t *testing.T) { + k := crypto.NewRandomKey() + + tests := []int{5, 23, 2<<18 + 23, 1 << 20} + if testLargeCrypto { + tests = append(tests, 7<<20+123) + } + + for _, size := range tests { + data := rtest.Random(42, size) + buf := make([]byte, 0, size+crypto.Extension) + + nonce := crypto.NewRandomNonce() + ciphertext := k.Seal(buf[:0], nonce, data, nil) + rtest.Assert(t, len(ciphertext) == len(data)+k.Overhead(), + "ciphertext length does not match: want %d, got %d", + len(data)+crypto.Extension, len(ciphertext)) + + plaintext := make([]byte, 0, len(ciphertext)) + plaintext, err := k.Open(plaintext[:0], nonce, ciphertext, nil) + rtest.OK(t, err) + rtest.Assert(t, len(plaintext) == len(data), + "plaintext length does not match: want %d, got %d", + len(data), len(plaintext)) + + rtest.Equals(t, plaintext, data) + } +} + +func TestSmallBuffer(t *testing.T) { + k := crypto.NewRandomKey() + + size := 600 + data := make([]byte, size) + _, err := io.ReadFull(rand.Reader, data) + rtest.OK(t, err) + + ciphertext := make([]byte, 0, size/2) + nonce := crypto.NewRandomNonce() + ciphertext = k.Seal(ciphertext[:0], nonce, data, nil) + // this must extend the slice + rtest.Assert(t, cap(ciphertext) > size/2, + "expected extended slice, but capacity is only %d bytes", + cap(ciphertext)) + + // check for the correct plaintext + plaintext := make([]byte, len(ciphertext)) + plaintext, err = k.Open(plaintext[:0], nonce, ciphertext, nil) + rtest.OK(t, err) + rtest.Assert(t, bytes.Equal(plaintext, data), + "wrong plaintext returned") +} + +func TestSameBuffer(t *testing.T) { + k := crypto.NewRandomKey() + + size := 600 + data := make([]byte, size) + _, err := io.ReadFull(rand.Reader, data) + rtest.OK(t, err) + + ciphertext := make([]byte, 0, size+crypto.Extension) + + nonce := crypto.NewRandomNonce() + ciphertext = k.Seal(ciphertext, nonce, data, nil) + + // use the same buffer for decryption + ciphertext, err = k.Open(ciphertext[:0], nonce, ciphertext, nil) + rtest.OK(t, err) + rtest.Assert(t, bytes.Equal(ciphertext, data), + "wrong plaintext returned") +} + +func encrypt(t testing.TB, k *crypto.Key, data, ciphertext, nonce []byte) []byte { + prefixlen := len(ciphertext) + ciphertext = k.Seal(ciphertext, nonce, data, nil) + if len(ciphertext) != len(data)+k.Overhead()+prefixlen { + t.Fatalf("destination slice has wrong length, want %d, got %d", + len(data)+k.Overhead(), len(ciphertext)) + } + + return ciphertext +} + +func decryptNewSliceAndCompare(t testing.TB, k *crypto.Key, data, ciphertext, nonce []byte) { + plaintext := make([]byte, 0, len(ciphertext)) + decryptAndCompare(t, k, data, ciphertext, nonce, plaintext) +} + +func decryptAndCompare(t testing.TB, k *crypto.Key, data, ciphertext, nonce, dst []byte) { + prefix := make([]byte, len(dst)) + copy(prefix, dst) + + plaintext, err := k.Open(dst, nonce, ciphertext, nil) + if err != nil { + t.Fatalf("unable to decrypt ciphertext: %v", err) + } + + if len(data)+len(prefix) != len(plaintext) { + t.Fatalf("wrong plaintext returned, want %d bytes, got %d", len(data)+len(prefix), len(plaintext)) + } + + if !bytes.Equal(plaintext[:len(prefix)], prefix) { + t.Fatal("prefix is wrong") + } + + if !bytes.Equal(plaintext[len(prefix):], data) { + t.Fatal("wrong plaintext returned") + } +} + +func TestAppendOpen(t *testing.T) { + k := crypto.NewRandomKey() + nonce := crypto.NewRandomNonce() + + data := make([]byte, 600) + _, err := io.ReadFull(rand.Reader, data) + rtest.OK(t, err) + ciphertext := encrypt(t, k, data, nil, nonce) + + // we need to test several different cases: + // * destination slice is nil + // * destination slice is empty and has enough capacity + // * destination slice is empty and does not have enough capacity + // * destination slice contains data and has enough capacity + // * destination slice contains data and does not have enough capacity + + // destination slice is nil + t.Run("nil", func(t *testing.T) { + var plaintext []byte + decryptAndCompare(t, k, data, ciphertext, nonce, plaintext) + }) + + // destination slice is empty and has enough capacity + t.Run("empty-large", func(t *testing.T) { + plaintext := make([]byte, 0, len(data)+100) + decryptAndCompare(t, k, data, ciphertext, nonce, plaintext) + }) + + // destination slice is empty and does not have enough capacity + t.Run("empty-small", func(t *testing.T) { + plaintext := make([]byte, 0, len(data)/2) + decryptAndCompare(t, k, data, ciphertext, nonce, plaintext) + }) + + // destination slice contains data and has enough capacity + t.Run("prefix-large", func(t *testing.T) { + plaintext := make([]byte, 0, len(data)+100) + plaintext = append(plaintext, []byte("foobar")...) + decryptAndCompare(t, k, data, ciphertext, nonce, plaintext) + }) + + // destination slice contains data and does not have enough capacity + t.Run("prefix-small", func(t *testing.T) { + plaintext := make([]byte, 0, len(data)/2) + plaintext = append(plaintext, []byte("foobar")...) + decryptAndCompare(t, k, data, ciphertext, nonce, plaintext) + }) +} + +func TestAppendSeal(t *testing.T) { + k := crypto.NewRandomKey() + + data := make([]byte, 600) + _, err := io.ReadFull(rand.Reader, data) + rtest.OK(t, err) + + // we need to test several different cases: + // * destination slice is nil + // * destination slice is empty and has enough capacity + // * destination slice is empty and does not have enough capacity + // * destination slice contains data and has enough capacity + // * destination slice contains data and does not have enough capacity + + // destination slice is nil + t.Run("nil", func(t *testing.T) { + nonce := crypto.NewRandomNonce() + var ciphertext []byte + + ciphertext = encrypt(t, k, data, ciphertext, nonce) + decryptNewSliceAndCompare(t, k, data, ciphertext, nonce) + }) + + // destination slice is empty and has enough capacity + t.Run("empty-large", func(t *testing.T) { + nonce := crypto.NewRandomNonce() + ciphertext := make([]byte, 0, len(data)+100) + + ciphertext = encrypt(t, k, data, ciphertext, nonce) + decryptNewSliceAndCompare(t, k, data, ciphertext, nonce) + }) + + // destination slice is empty and does not have enough capacity + t.Run("empty-small", func(t *testing.T) { + nonce := crypto.NewRandomNonce() + ciphertext := make([]byte, 0, len(data)/2) + + ciphertext = encrypt(t, k, data, ciphertext, nonce) + decryptNewSliceAndCompare(t, k, data, ciphertext, nonce) + }) + + // destination slice contains data and has enough capacity + t.Run("prefix-large", func(t *testing.T) { + nonce := crypto.NewRandomNonce() + ciphertext := make([]byte, 0, len(data)+100) + ciphertext = append(ciphertext, []byte("foobar")...) + + ciphertext = encrypt(t, k, data, ciphertext, nonce) + if string(ciphertext[:6]) != "foobar" { + t.Errorf("prefix is missing") + } + decryptNewSliceAndCompare(t, k, data, ciphertext[6:], nonce) + }) + + // destination slice contains data and does not have enough capacity + t.Run("prefix-small", func(t *testing.T) { + nonce := crypto.NewRandomNonce() + ciphertext := make([]byte, 0, len(data)/2) + ciphertext = append(ciphertext, []byte("foobar")...) + + ciphertext = encrypt(t, k, data, ciphertext, nonce) + if string(ciphertext[:6]) != "foobar" { + t.Errorf("prefix is missing") + } + decryptNewSliceAndCompare(t, k, data, ciphertext[6:], nonce) + }) +} + +func TestLargeEncrypt(t *testing.T) { + if !testLargeCrypto { + t.SkipNow() + } + + k := crypto.NewRandomKey() + + for _, size := range []int{chunker.MaxSize, chunker.MaxSize + 1, chunker.MaxSize + 1<<20} { + data := make([]byte, size) + _, err := io.ReadFull(rand.Reader, data) + rtest.OK(t, err) + + nonce := crypto.NewRandomNonce() + ciphertext := k.Seal(make([]byte, size+k.Overhead()), nonce, data, nil) + plaintext, err := k.Open([]byte{}, nonce, ciphertext, nil) + rtest.OK(t, err) + + rtest.Equals(t, plaintext, data) + } +} + +func BenchmarkEncrypt(b *testing.B) { + size := 8 << 20 // 8MiB + data := make([]byte, size) + + k := crypto.NewRandomKey() + buf := make([]byte, len(data)+crypto.Extension) + nonce := crypto.NewRandomNonce() + + b.ResetTimer() + b.SetBytes(int64(size)) + + for i := 0; i < b.N; i++ { + _ = k.Seal(buf, nonce, data, nil) + } +} + +func BenchmarkDecrypt(b *testing.B) { + size := 8 << 20 // 8MiB + data := make([]byte, size) + + k := crypto.NewRandomKey() + + plaintext := make([]byte, 0, size) + ciphertext := make([]byte, 0, size+crypto.Extension) + nonce := crypto.NewRandomNonce() + ciphertext = k.Seal(ciphertext, nonce, data, nil) + + var err error + + b.ResetTimer() + b.SetBytes(int64(size)) + + for i := 0; i < b.N; i++ { + _, err = k.Open(plaintext, nonce, ciphertext, nil) + rtest.OK(b, err) + } +} diff --git a/internal/crypto/doc.go b/internal/crypto/doc.go new file mode 100644 index 000000000..abbce210a --- /dev/null +++ b/internal/crypto/doc.go @@ -0,0 +1,2 @@ +// Package crypto provides all cryptographic operations needed in restic. +package crypto diff --git a/internal/crypto/kdf.go b/internal/crypto/kdf.go new file mode 100644 index 000000000..a63f3064a --- /dev/null +++ b/internal/crypto/kdf.go @@ -0,0 +1,102 @@ +package crypto + +import ( + "crypto/rand" + "time" + + "github.com/restic/restic/internal/errors" + + sscrypt "github.com/elithrar/simple-scrypt" + "golang.org/x/crypto/scrypt" +) + +const saltLength = 64 + +// Params are the default parameters used for the key derivation function KDF(). +type Params struct { + N int + R int + P int +} + +// DefaultKDFParams are the default parameters used for Calibrate and KDF(). +var DefaultKDFParams = Params{ + N: sscrypt.DefaultParams.N, + R: sscrypt.DefaultParams.R, + P: sscrypt.DefaultParams.P, +} + +// Calibrate determines new KDF parameters for the current hardware. +func Calibrate(timeout time.Duration, memory int) (Params, error) { + defaultParams := sscrypt.Params{ + N: DefaultKDFParams.N, + R: DefaultKDFParams.R, + P: DefaultKDFParams.P, + DKLen: sscrypt.DefaultParams.DKLen, + SaltLen: sscrypt.DefaultParams.SaltLen, + } + + params, err := sscrypt.Calibrate(timeout, memory, defaultParams) + if err != nil { + return DefaultKDFParams, errors.Wrap(err, "scrypt.Calibrate") + } + + return Params{ + N: params.N, + R: params.R, + P: params.P, + }, nil +} + +// KDF derives encryption and message authentication keys from the password +// using the supplied parameters N, R and P and the Salt. +func KDF(p Params, salt []byte, password string) (*Key, error) { + if len(salt) != saltLength { + return nil, errors.Errorf("scrypt() called with invalid salt bytes (len %d)", len(salt)) + } + + // make sure we have valid parameters + params := sscrypt.Params{ + N: p.N, + R: p.R, + P: p.P, + DKLen: sscrypt.DefaultParams.DKLen, + SaltLen: len(salt), + } + + if err := params.Check(); err != nil { + return nil, errors.Wrap(err, "Check") + } + + derKeys := &Key{} + + keybytes := macKeySize + aesKeySize + scryptKeys, err := scrypt.Key([]byte(password), salt, p.N, p.R, p.P, keybytes) + if err != nil { + return nil, errors.Wrap(err, "scrypt.Key") + } + + if len(scryptKeys) != keybytes { + return nil, errors.Errorf("invalid numbers of bytes expanded from scrypt(): %d", len(scryptKeys)) + } + + // first 32 byte of scrypt output is the encryption key + copy(derKeys.EncryptionKey[:], scryptKeys[:aesKeySize]) + + // next 32 byte of scrypt output is the mac key, in the form k||r + macKeyFromSlice(&derKeys.MACKey, scryptKeys[aesKeySize:]) + + return derKeys, nil +} + +// NewSalt returns new random salt bytes to use with KDF(). If NewSalt returns +// an error, this is a grave situation and the program must abort and terminate. +func NewSalt() ([]byte, error) { + buf := make([]byte, saltLength) + n, err := rand.Read(buf) + if n != saltLength || err != nil { + panic("unable to read enough random bytes for new salt") + } + + return buf, nil +} diff --git a/internal/crypto/kdf_test.go b/internal/crypto/kdf_test.go new file mode 100644 index 000000000..5823eb889 --- /dev/null +++ b/internal/crypto/kdf_test.go @@ -0,0 +1,14 @@ +package crypto + +import ( + "testing" + "time" +) + +func TestCalibrate(t *testing.T) { + params, err := Calibrate(100*time.Millisecond, 50) + if err != nil { + t.Fatal(err) + } + t.Logf("testing calibrate, params after: %v", params) +} diff --git a/internal/debug/debug.go b/internal/debug/debug.go new file mode 100644 index 000000000..543755e25 --- /dev/null +++ b/internal/debug/debug.go @@ -0,0 +1,214 @@ +// +build debug + +package debug + +import ( + "fmt" + "log" + "os" + "path" + "path/filepath" + "runtime" + "strings" + + "github.com/restic/restic/internal/fs" + + "github.com/restic/restic/internal/errors" +) + +var opts struct { + logger *log.Logger + funcs map[string]bool + files map[string]bool +} + +// make sure that all the initialization happens before the init() functions +// are called, cf https://golang.org/ref/spec#Package_initialization +var _ = initDebug() + +func initDebug() bool { + initDebugLogger() + initDebugTags() + + fmt.Fprintf(os.Stderr, "debug enabled\n") + + return true +} + +func initDebugLogger() { + debugfile := os.Getenv("DEBUG_LOG") + if debugfile == "" { + return + } + + fmt.Fprintf(os.Stderr, "debug log file %v\n", debugfile) + + f, err := fs.OpenFile(debugfile, os.O_WRONLY|os.O_APPEND, 0600) + + if err == nil { + _, err = f.Seek(2, 0) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to seek to the end of %v: %v\n", debugfile, err) + os.Exit(3) + } + } + + if err != nil && os.IsNotExist(errors.Cause(err)) { + f, err = fs.OpenFile(debugfile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + } + + if err != nil { + fmt.Fprintf(os.Stderr, "unable to open debug log file: %v\n", err) + os.Exit(2) + } + + opts.logger = log.New(f, "", log.LstdFlags) +} + +func parseFilter(envname string, pad func(string) string) map[string]bool { + filter := make(map[string]bool) + + env := os.Getenv(envname) + if env == "" { + return filter + } + + for _, fn := range strings.Split(env, ",") { + t := pad(strings.TrimSpace(fn)) + val := true + if t[0] == '-' { + val = false + t = t[1:] + } else if t[0] == '+' { + val = true + t = t[1:] + } + + // test pattern + _, err := path.Match(t, "") + if err != nil { + fmt.Fprintf(os.Stderr, "error: invalid pattern %q: %v\n", t, err) + os.Exit(5) + } + + filter[t] = val + } + + return filter +} + +func padFunc(s string) string { + if s == "all" { + return s + } + + return s +} + +func padFile(s string) string { + if s == "all" { + return s + } + + if !strings.Contains(s, "/") { + s = "*/" + s + } + + if !strings.Contains(s, ":") { + s = s + ":*" + } + + return s +} + +func initDebugTags() { + opts.funcs = parseFilter("DEBUG_FUNCS", padFunc) + opts.files = parseFilter("DEBUG_FILES", padFile) +} + +// taken from https://github.com/VividCortex/trace +func goroutineNum() int { + b := make([]byte, 20) + runtime.Stack(b, false) + var num int + + fmt.Sscanf(string(b), "goroutine %d ", &num) + return num +} + +// taken from https://github.com/VividCortex/trace +func getPosition() (fn, dir, file string, line int) { + pc, file, line, ok := runtime.Caller(2) + if !ok { + return "", "", "", 0 + } + + dirname, filename := filepath.Base(filepath.Dir(file)), filepath.Base(file) + + Func := runtime.FuncForPC(pc) + + return path.Base(Func.Name()), dirname, filename, line +} + +func checkFilter(filter map[string]bool, key string) bool { + // check if key is enabled directly + if v, ok := filter[key]; ok { + return v + } + + // check for globbing + for k, v := range filter { + if m, _ := path.Match(k, key); m { + return v + } + } + + // check if tag "all" is enabled + if v, ok := filter["all"]; ok && v { + return true + } + + return false +} + +// Log prints a message to the debug log (if debug is enabled). +func Log(f string, args ...interface{}) { + fn, dir, file, line := getPosition() + goroutine := goroutineNum() + + if len(f) == 0 || f[len(f)-1] != '\n' { + f += "\n" + } + + type Shortener interface { + Str() string + } + + for i, item := range args { + if shortener, ok := item.(Shortener); ok { + args[i] = shortener.Str() + } + } + + pos := fmt.Sprintf("%s/%s:%d", dir, file, line) + + formatString := fmt.Sprintf("%s\t%s\t%d\t%s", pos, fn, goroutine, f) + + dbgprint := func() { + fmt.Fprintf(os.Stderr, formatString, args...) + } + + if opts.logger != nil { + opts.logger.Printf(formatString, args...) + } + + filename := fmt.Sprintf("%s/%s:%d", dir, file, line) + if checkFilter(opts.files, filename) { + dbgprint() + return + } + + if checkFilter(opts.funcs, fn) { + dbgprint() + } +} diff --git a/internal/debug/debug_release.go b/internal/debug/debug_release.go new file mode 100644 index 000000000..9b4259cea --- /dev/null +++ b/internal/debug/debug_release.go @@ -0,0 +1,6 @@ +// +build !debug + +package debug + +// Log prints a message to the debug log (if debug is enabled). +func Log(fmt string, args ...interface{}) {} diff --git a/internal/debug/doc.go b/internal/debug/doc.go new file mode 100644 index 000000000..a5a62e29b --- /dev/null +++ b/internal/debug/doc.go @@ -0,0 +1,2 @@ +// Package debug provides an infrastructure for logging debug information and breakpoints. +package debug diff --git a/internal/debug/hooks.go b/internal/debug/hooks.go new file mode 100644 index 000000000..e47084fee --- /dev/null +++ b/internal/debug/hooks.go @@ -0,0 +1,28 @@ +// +build debug + +package debug + +var ( + hooks map[string]func(interface{}) +) + +func init() { + hooks = make(map[string]func(interface{})) +} + +func Hook(name string, f func(interface{})) { + hooks[name] = f +} + +func RunHook(name string, context interface{}) { + f, ok := hooks[name] + if !ok { + return + } + + f(context) +} + +func RemoveHook(name string) { + delete(hooks, name) +} diff --git a/internal/debug/hooks_release.go b/internal/debug/hooks_release.go new file mode 100644 index 000000000..86efa9f64 --- /dev/null +++ b/internal/debug/hooks_release.go @@ -0,0 +1,9 @@ +// +build !debug + +package debug + +func Hook(name string, f func(interface{})) {} + +func RunHook(name string, context interface{}) {} + +func RemoveHook(name string) {} diff --git a/internal/debug/log_test.go b/internal/debug/log_test.go new file mode 100644 index 000000000..8751645ea --- /dev/null +++ b/internal/debug/log_test.go @@ -0,0 +1,34 @@ +package debug_test + +import ( + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/restic" + + "testing" +) + +func BenchmarkLogStatic(b *testing.B) { + for i := 0; i < b.N; i++ { + debug.Log("Static string") + } +} + +func BenchmarkLogIDStr(b *testing.B) { + id := restic.NewRandomID() + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + debug.Log("id: %v", id) + } +} + +func BenchmarkLogIDString(b *testing.B) { + id := restic.NewRandomID() + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + debug.Log("id: %s", id) + } +} diff --git a/internal/debug/round_tripper_debug.go b/internal/debug/round_tripper_debug.go new file mode 100644 index 000000000..7b29267c9 --- /dev/null +++ b/internal/debug/round_tripper_debug.go @@ -0,0 +1,95 @@ +// +build debug + +package debug + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httputil" + "os" + + "github.com/restic/restic/internal/errors" +) + +type eofDetectRoundTripper struct { + http.RoundTripper +} + +type eofDetectReader struct { + eofSeen bool + rd io.ReadCloser +} + +func (rd *eofDetectReader) Read(p []byte) (n int, err error) { + n, err = rd.rd.Read(p) + if err == io.EOF { + rd.eofSeen = true + } + return n, err +} + +func (rd *eofDetectReader) Close() error { + if !rd.eofSeen { + buf, err := ioutil.ReadAll(rd) + msg := fmt.Sprintf("body not drained, %d bytes not read", len(buf)) + if err != nil { + msg += fmt.Sprintf(", error: %v", err) + } + + if len(buf) > 0 { + if len(buf) > 20 { + buf = append(buf[:20], []byte("...")...) + } + msg += fmt.Sprintf(", body: %q", buf) + } + + fmt.Fprintln(os.Stderr, msg) + Log("%s: %+v", msg, errors.New("Close()")) + } + return rd.rd.Close() +} + +func (tr eofDetectRoundTripper) RoundTrip(req *http.Request) (res *http.Response, err error) { + res, err = tr.RoundTripper.RoundTrip(req) + if res != nil && res.Body != nil { + res.Body = &eofDetectReader{rd: res.Body} + } + return res, err +} + +type loggingRoundTripper struct { + http.RoundTripper +} + +// RoundTripper returns a new http.RoundTripper which logs all requests (if +// debug is enabled). When debug is not enabled, upstream is returned. +func RoundTripper(upstream http.RoundTripper) http.RoundTripper { + return loggingRoundTripper{eofDetectRoundTripper{upstream}} +} + +func (tr loggingRoundTripper) RoundTrip(req *http.Request) (res *http.Response, err error) { + trace, err := httputil.DumpRequestOut(req, false) + if err != nil { + Log("DumpRequestOut() error: %v\n", err) + } else { + Log("------------ HTTP REQUEST -----------\n%s", trace) + } + + res, err = tr.RoundTripper.RoundTrip(req) + if err != nil { + Log("RoundTrip() returned error: %v", err) + } + + if res != nil { + trace, err := httputil.DumpResponse(res, false) + if err != nil { + Log("DumpResponse() error: %v\n", err) + } else { + Log("------------ HTTP RESPONSE ----------\n%s", trace) + } + } + + return res, err +} diff --git a/internal/debug/round_tripper_release.go b/internal/debug/round_tripper_release.go new file mode 100644 index 000000000..6efff2c28 --- /dev/null +++ b/internal/debug/round_tripper_release.go @@ -0,0 +1,11 @@ +// +build !debug + +package debug + +import "net/http" + +// RoundTripper returns a new http.RoundTripper which logs all requests (if +// debug is enabled). When debug is not enabled, upstream is returned. +func RoundTripper(upstream http.RoundTripper) http.RoundTripper { + return upstream +} diff --git a/internal/errors/doc.go b/internal/errors/doc.go new file mode 100644 index 000000000..9f63cf958 --- /dev/null +++ b/internal/errors/doc.go @@ -0,0 +1,2 @@ +// Package errors provides custom error types used within restic. +package errors diff --git a/internal/errors/errors.go b/internal/errors/errors.go new file mode 100644 index 000000000..ffd3d615e --- /dev/null +++ b/internal/errors/errors.go @@ -0,0 +1,53 @@ +package errors + +import ( + "net/url" + + "github.com/pkg/errors" +) + +// New creates a new error based on message. Wrapped so that this package does +// not appear in the stack trace. +var New = errors.New + +// Errorf creates an error based on a format string and values. Wrapped so that +// this package does not appear in the stack trace. +var Errorf = errors.Errorf + +// Wrap wraps an error retrieved from outside of restic. Wrapped so that this +// package does not appear in the stack trace. +var Wrap = errors.Wrap + +// Wrapf returns an error annotating err with the format specifier. If err is +// nil, Wrapf returns nil. +var Wrapf = errors.Wrapf + +// WithMessage annotates err with a new message. If err is nil, WithMessage +// returns nil. +var WithMessage = errors.WithMessage + +// Cause returns the cause of an error. It will also unwrap certain errors, +// e.g. *url.Error returned by the net/http client. +func Cause(err error) error { + type Causer interface { + Cause() error + } + + for { + // unwrap *url.Error + if urlErr, ok := err.(*url.Error); ok { + err = urlErr.Err + continue + } + + // if err is a Causer, return the cause for this error. + if c, ok := err.(Causer); ok { + err = c.Cause() + continue + } + + break + } + + return err +} diff --git a/internal/errors/fatal.go b/internal/errors/fatal.go new file mode 100644 index 000000000..02ffdaab4 --- /dev/null +++ b/internal/errors/fatal.go @@ -0,0 +1,38 @@ +package errors + +import "fmt" + +// fatalError is an error that should be printed to the user, then the program +// should exit with an error code. +type fatalError string + +func (e fatalError) Error() string { + return string(e) +} + +func (e fatalError) Fatal() bool { + return true +} + +// Fataler is an error which should be printed to the user directly. +// Afterwards, the program should exit with an error. +type Fataler interface { + Fatal() bool +} + +// IsFatal returns true if err is a fatal message that should be printed to the +// user. Then, the program should exit. +func IsFatal(err error) bool { + e, ok := err.(Fataler) + return ok && e.Fatal() +} + +// Fatal returns a wrapped error which implements the Fataler interface. +func Fatal(s string) error { + return Wrap(fatalError(s), "Fatal") +} + +// Fatalf returns an error which implements the Fataler interface. +func Fatalf(s string, data ...interface{}) error { + return Wrap(fatalError(fmt.Sprintf(s, data...)), "Fatal") +} diff --git a/internal/filter/doc.go b/internal/filter/doc.go new file mode 100644 index 000000000..4e0ba0f4d --- /dev/null +++ b/internal/filter/doc.go @@ -0,0 +1,5 @@ +// Package filter implements filters for files similar to filepath.Glob, but +// in contrast to filepath.Glob a pattern may specify directories. +// +// For a list of valid patterns please see the documentation on filepath.Glob. +package filter diff --git a/internal/filter/filter.go b/internal/filter/filter.go new file mode 100644 index 000000000..74deddb03 --- /dev/null +++ b/internal/filter/filter.go @@ -0,0 +1,189 @@ +package filter + +import ( + "path/filepath" + "strings" + + "github.com/restic/restic/internal/errors" +) + +// ErrBadString is returned when Match is called with the empty string as the +// second argument. +var ErrBadString = errors.New("filter.Match: string is empty") + +// Match returns true if str matches the pattern. When the pattern is +// malformed, filepath.ErrBadPattern is returned. The empty pattern matches +// everything, when str is the empty string ErrBadString is returned. +// +// Pattern can be a combination of patterns suitable for filepath.Match, joined +// by filepath.Separator. +// +// In addition patterns suitable for filepath.Match, pattern accepts a +// recursive wildcard '**', which greedily matches an arbitrary number of +// intermediate directories. +func Match(pattern, str string) (matched bool, err error) { + if pattern == "" { + return true, nil + } + + pattern = filepath.Clean(pattern) + + if str == "" { + return false, ErrBadString + } + + // convert file path separator to '/' + if filepath.Separator != '/' { + pattern = strings.Replace(pattern, string(filepath.Separator), "/", -1) + str = strings.Replace(str, string(filepath.Separator), "/", -1) + } + + patterns := strings.Split(pattern, "/") + strs := strings.Split(str, "/") + + return match(patterns, strs) +} + +// ChildMatch returns true if children of str can match the pattern. When the pattern is +// malformed, filepath.ErrBadPattern is returned. The empty pattern matches +// everything, when str is the empty string ErrBadString is returned. +// +// Pattern can be a combination of patterns suitable for filepath.Match, joined +// by filepath.Separator. +// +// In addition patterns suitable for filepath.Match, pattern accepts a +// recursive wildcard '**', which greedily matches an arbitrary number of +// intermediate directories. +func ChildMatch(pattern, str string) (matched bool, err error) { + if pattern == "" { + return true, nil + } + + pattern = filepath.Clean(pattern) + + if str == "" { + return false, ErrBadString + } + + // convert file path separator to '/' + if filepath.Separator != '/' { + pattern = strings.Replace(pattern, string(filepath.Separator), "/", -1) + str = strings.Replace(str, string(filepath.Separator), "/", -1) + } + + patterns := strings.Split(pattern, "/") + strs := strings.Split(str, "/") + + return childMatch(patterns, strs) +} + +func childMatch(patterns, strs []string) (matched bool, err error) { + if patterns[0] != "" { + // relative pattern can always be nested down + return true, nil + } + + ok, pos := hasDoubleWildcard(patterns) + if ok && len(strs) >= pos { + // cut off at the double wildcard + strs = strs[:pos] + } + + // match path against absolute pattern prefix + l := 0 + if len(strs) > len(patterns) { + l = len(patterns) + } else { + l = len(strs) + } + return match(patterns[0:l], strs) +} + +func hasDoubleWildcard(list []string) (ok bool, pos int) { + for i, item := range list { + if item == "**" { + return true, i + } + } + + return false, 0 +} + +func match(patterns, strs []string) (matched bool, err error) { + if ok, pos := hasDoubleWildcard(patterns); ok { + // gradually expand '**' into separate wildcards + for i := 0; i <= len(strs)-len(patterns)+1; i++ { + newPat := make([]string, pos) + copy(newPat, patterns[:pos]) + for k := 0; k < i; k++ { + newPat = append(newPat, "*") + } + newPat = append(newPat, patterns[pos+1:]...) + + matched, err := match(newPat, strs) + if err != nil { + return false, err + } + + if matched { + return true, nil + } + } + + return false, nil + } + + if len(patterns) == 0 && len(strs) == 0 { + return true, nil + } + + if len(patterns) <= len(strs) { + outer: + for offset := len(strs) - len(patterns); offset >= 0; offset-- { + + for i := len(patterns) - 1; i >= 0; i-- { + ok, err := filepath.Match(patterns[i], strs[offset+i]) + if err != nil { + return false, errors.Wrap(err, "Match") + } + + if !ok { + continue outer + } + } + + return true, nil + } + } + + return false, nil +} + +// List returns true if str matches one of the patterns. Empty patterns are +// ignored. +func List(patterns []string, str string) (matched bool, childMayMatch bool, err error) { + for _, pat := range patterns { + if pat == "" { + continue + } + + m, err := Match(pat, str) + if err != nil { + return false, false, err + } + + c, err := ChildMatch(pat, str) + if err != nil { + return false, false, err + } + + matched = matched || m + childMayMatch = childMayMatch || c + + if matched && childMayMatch { + return true, true, nil + } + } + + return matched, childMayMatch, nil +} diff --git a/internal/filter/filter_test.go b/internal/filter/filter_test.go new file mode 100644 index 000000000..97df452fb --- /dev/null +++ b/internal/filter/filter_test.go @@ -0,0 +1,389 @@ +package filter_test + +import ( + "bufio" + "compress/bzip2" + "fmt" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/restic/restic/internal/filter" +) + +var matchTests = []struct { + pattern string + path string + match bool +}{ + {"", "", true}, + {"", "foo", true}, + {"", "/x/y/z/foo", true}, + {"*.go", "/foo/bar/test.go", true}, + {"*.c", "/foo/bar/test.go", false}, + {"*", "/foo/bar/test.go", true}, + {"foo*", "/foo/bar/test.go", true}, + {"bar*", "/foo/bar/test.go", true}, + {"/bar*", "/foo/bar/test.go", false}, + {"bar/*", "/foo/bar/test.go", true}, + {"baz/*", "/foo/bar/test.go", false}, + {"bar/test.go", "/foo/bar/test.go", true}, + {"bar/*.go", "/foo/bar/test.go", true}, + {"ba*/*.go", "/foo/bar/test.go", true}, + {"bb*/*.go", "/foo/bar/test.go", false}, + {"test.*", "/foo/bar/test.go", true}, + {"tesT.*", "/foo/bar/test.go", false}, + {"bar/*", "/foo/bar/baz", true}, + {"bar", "/foo/bar", true}, + {"/foo/bar", "/foo/bar", true}, + {"/foo/bar/", "/foo/bar", true}, + {"/foo/bar", "/foo/baz", false}, + {"/foo/bar", "/foo/baz/", false}, + {"/foo///bar", "/foo/bar", true}, + {"/foo/../bar", "/foo/bar", false}, + {"/foo/../bar", "/bar", true}, + {"/foo", "/foo/baz", true}, + {"/foo/", "/foo/baz", true}, + {"/foo/*", "/foo", false}, + {"/foo/*", "/foo/baz", true}, + {"bar", "/foo/bar/baz", true}, + {"bar", "/foo/bar/test.go", true}, + {"/foo/*test.*", "/foo/bar/test.go", false}, + {"/foo/*/test.*", "/foo/bar/test.go", true}, + {"/foo/*/bar/test.*", "/foo/bar/test.go", false}, + {"/*/*/bar/test.*", "/foo/bar/test.go", false}, + {"/*/*/bar/test.*", "/foo/bar/baz/test.go", false}, + {"/*/*/baz/test.*", "/foo/bar/baz/test.go", true}, + {"/*/foo/bar/test.*", "/foo/bar/baz/test.go", false}, + {"/*/foo/bar/test.*", "/foo/bar/baz/test.go", false}, + {"/foo/bar/test.*", "bar/baz/test.go", false}, + {"/x/y/bar/baz/test.*", "bar/baz/test.go", false}, + {"/x/y/bar/baz/test.c", "bar/baz/test.go", false}, + {"baz/test.*", "bar/baz/test.go", true}, + {"baz/tesT.*", "bar/baz/test.go", false}, + {"test.go", "bar/baz/test.go", true}, + {"*.go", "bar/baz/test.go", true}, + {"*.c", "bar/baz/test.go", false}, + {"sdk", "/foo/bar/sdk", true}, + {"sdk", "/foo/bar/sdk/test/sdk_foo.go", true}, + { + "sdk/*/cpp/*/*vars*.html", + "/usr/share/doc/libreoffice/sdk/docs/cpp/ref/a00517.html", + false, + }, + {"foo/**/bar/*.go", "/home/user/foo/work/special/project/bar/test.go", true}, + {"foo/**/bar/*.go", "/home/user/foo/bar/test.go", true}, + {"foo/**/bar/*.go", "x/foo/bar/test.go", true}, + {"foo/**/bar/*.go", "foo/bar/test.go", true}, + {"foo/**/bar/*.go", "foo/bar/baz/bar/test.go", true}, + {"foo/**/bar/*.go", "/home/user/foo/test.c", false}, + {"foo/**/bar/*.go", "bar/foo/main.go", false}, + {"foo/**/bar/*.go", "/foo/bar/main.go", true}, + {"foo/**/bar/*.go", "bar/main.go", false}, + {"foo/**/bar", "/home/user/foo/x/y/bar", true}, + {"foo/**/bar", "/home/user/foo/x/y/bar/main.go", true}, + {"foo/**/bar/**/x", "/home/user/foo/bar/x", true}, + {"foo/**/bar/**/x", "/home/user/foo/blaaa/blaz/bar/shared/work/x", true}, + {"user/**/important*", "/home/user/work/x/y/hidden/x", false}, + {"user/**/hidden*/**/c", "/home/user/work/x/y/hidden/z/a/b/c", true}, + {"c:/foo/*test.*", "c:/foo/bar/test.go", false}, + {"c:/foo", "c:/foo/bar", true}, + {"c:/foo/", "c:/foo/bar", true}, + {"c:/foo/*/test.*", "c:/foo/bar/test.go", true}, + {"c:/foo/*/bar/test.*", "c:/foo/bar/test.go", false}, +} + +func testpattern(t *testing.T, pattern, path string, shouldMatch bool) { + match, err := filter.Match(pattern, path) + if err != nil { + t.Errorf("test pattern %q failed: expected no error for path %q, but error returned: %v", + pattern, path, err) + } + + if match != shouldMatch { + t.Errorf("test: filter.Match(%q, %q): expected %v, got %v", + pattern, path, shouldMatch, match) + } +} + +func TestMatch(t *testing.T) { + for _, test := range matchTests { + t.Run("", func(t *testing.T) { + testpattern(t, test.pattern, test.path, test.match) + }) + + // Test with native path separator + if filepath.Separator != '/' { + pattern := strings.Replace(test.pattern, "/", string(filepath.Separator), -1) + // Test with pattern as native + t.Run("pattern-native", func(t *testing.T) { + testpattern(t, pattern, test.path, test.match) + }) + + path := strings.Replace(test.path, "/", string(filepath.Separator), -1) + t.Run("path-native", func(t *testing.T) { + // Test with path as native + testpattern(t, test.pattern, path, test.match) + }) + + t.Run("both-native", func(t *testing.T) { + // Test with both pattern and path as native + testpattern(t, pattern, path, test.match) + }) + } + } +} + +var childMatchTests = []struct { + pattern string + path string + match bool +}{ + {"", "", true}, + {"", "/foo", true}, + {"", "/x/y/z/foo", true}, + {"foo/bar", "/foo", true}, + {"baz/bar", "/foo", true}, + {"foo", "/foo/bar", true}, + {"bar", "/foo", true}, + {"baz", "/foo/bar", true}, + {"*", "/foo", true}, + {"*", "/foo/bar", true}, + {"/foo/bar", "/foo", true}, + {"/foo/bar/baz", "/foo", true}, + {"/foo/bar/baz", "/foo/bar", true}, + {"/foo/bar/baz", "/foo/baz", false}, + {"/foo/**/baz", "/foo/bar/baz", true}, + {"/foo/**/baz", "/foo/bar/baz/blah", true}, + {"/foo/**/qux", "/foo/bar/baz/qux", true}, + {"/foo/**/qux", "/foo/bar/baz", true}, + {"/foo/**/qux", "/foo/bar/baz/boo", true}, + {"/foo/**", "/foo/bar/baz", true}, + {"/foo/**", "/foo/bar", true}, + {"foo/**/bar/**/x", "/home/user/foo", true}, + {"foo/**/bar/**/x", "/home/user/foo/bar", true}, + {"foo/**/bar/**/x", "/home/user/foo/blaaa/blaz/bar/shared/work/x", true}, + {"/foo/*/qux", "/foo/bar", true}, + {"/foo/*/qux", "/foo/bar/boo", false}, + {"/foo/*/qux", "/foo/bar/boo/xx", false}, + {"/baz/bar", "/foo", false}, + {"/foo", "/foo/bar", true}, + {"/*", "/foo", true}, + {"/*", "/foo/bar", true}, + {"/foo", "/foo/bar", true}, + {"/**", "/foo", true}, + {"/*/**", "/foo", true}, + {"/*/**", "/foo/bar", true}, + {"/*/bar", "/foo", true}, + {"/bar/*", "/foo", false}, + {"/foo/*/baz", "/foo/bar", true}, + {"/foo/*/baz", "/foo/baz", true}, + {"/foo/*/baz", "/bar/baz", false}, + {"/**/*", "/foo", true}, + {"/**/bar", "/foo/bar", true}, +} + +func testchildpattern(t *testing.T, pattern, path string, shouldMatch bool) { + match, err := filter.ChildMatch(pattern, path) + if err != nil { + t.Errorf("test child pattern %q failed: expected no error for path %q, but error returned: %v", + pattern, path, err) + } + + if match != shouldMatch { + t.Errorf("test: filter.ChildMatch(%q, %q): expected %v, got %v", + pattern, path, shouldMatch, match) + } +} + +func TestChildMatch(t *testing.T) { + for _, test := range childMatchTests { + t.Run("", func(t *testing.T) { + testchildpattern(t, test.pattern, test.path, test.match) + }) + + // Test with native path separator + if filepath.Separator != '/' { + pattern := strings.Replace(test.pattern, "/", string(filepath.Separator), -1) + // Test with pattern as native + t.Run("pattern-native", func(t *testing.T) { + testchildpattern(t, pattern, test.path, test.match) + }) + + path := strings.Replace(test.path, "/", string(filepath.Separator), -1) + t.Run("path-native", func(t *testing.T) { + // Test with path as native + testchildpattern(t, test.pattern, path, test.match) + }) + + t.Run("both-native", func(t *testing.T) { + // Test with both pattern and path as native + testchildpattern(t, pattern, path, test.match) + }) + } + } +} + +func ExampleMatch() { + match, _ := filter.Match("*.go", "/home/user/file.go") + fmt.Printf("match: %v\n", match) + // Output: + // match: true +} + +func ExampleMatch_wildcards() { + match, _ := filter.Match("/home/[uU]ser/?.go", "/home/user/F.go") + fmt.Printf("match: %v\n", match) + // Output: + // match: true +} + +var filterListTests = []struct { + patterns []string + path string + match bool +}{ + {[]string{"*.go"}, "/foo/bar/test.go", true}, + {[]string{"*.c"}, "/foo/bar/test.go", false}, + {[]string{"*.go", "*.c"}, "/foo/bar/test.go", true}, + {[]string{"*"}, "/foo/bar/test.go", true}, + {[]string{"x"}, "/foo/bar/test.go", false}, + {[]string{"?"}, "/foo/bar/test.go", false}, + {[]string{"?", "x"}, "/foo/bar/x", true}, + {[]string{"/*/*/bar/test.*"}, "/foo/bar/test.go", false}, + {[]string{"/*/*/bar/test.*", "*.go"}, "/foo/bar/test.go", true}, + {[]string{"", "*.c"}, "/foo/bar/test.go", false}, +} + +func TestList(t *testing.T) { + for i, test := range filterListTests { + match, _, err := filter.List(test.patterns, test.path) + if err != nil { + t.Errorf("test %d failed: expected no error for patterns %q, but error returned: %v", + i, test.patterns, err) + continue + } + + if match != test.match { + t.Errorf("test %d: filter.MatchList(%q, %q): expected %v, got %v", + i, test.patterns, test.path, test.match, match) + } + } +} + +func ExampleList() { + match, _, _ := filter.List([]string{"*.c", "*.go"}, "/home/user/file.go") + fmt.Printf("match: %v\n", match) + // Output: + // match: true +} + +func extractTestLines(t testing.TB) (lines []string) { + f, err := os.Open("testdata/libreoffice.txt.bz2") + if err != nil { + t.Fatal(err) + } + + defer func() { + if err := f.Close(); err != nil { + t.Fatal(err) + } + }() + + sc := bufio.NewScanner(bzip2.NewReader(f)) + for sc.Scan() { + lines = append(lines, sc.Text()) + } + + return lines +} + +func TestFilterPatternsFile(t *testing.T) { + lines := extractTestLines(t) + + var testPatterns = []struct { + pattern string + hits uint + }{ + {"*.html", 18249}, + {"sdk", 22186}, + {"sdk/*/cpp/*/*vars.html", 3}, + } + + for _, test := range testPatterns { + var c uint + for _, line := range lines { + match, err := filter.Match(test.pattern, line) + if err != nil { + t.Error(err) + continue + } + + if match { + c++ + // fmt.Printf("pattern %q, line %q\n", test.pattern, line) + } + } + + if c != test.hits { + t.Errorf("wrong number of hits for pattern %q: want %d, got %d", + test.pattern, test.hits, c) + } + } +} + +func BenchmarkFilterLines(b *testing.B) { + pattern := "sdk/*/cpp/*/*vars.html" + lines := extractTestLines(b) + var c uint + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + c = 0 + for _, line := range lines { + match, err := filter.Match(pattern, line) + if err != nil { + b.Fatal(err) + } + + if match { + c++ + } + } + + if c != 3 { + b.Fatalf("wrong number of matches: expected 3, got %d", c) + } + } +} + +func BenchmarkFilterPatterns(b *testing.B) { + patterns := []string{ + "sdk/*", + "*.html", + } + lines := extractTestLines(b) + var c uint + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + c = 0 + for _, line := range lines { + match, _, err := filter.List(patterns, line) + if err != nil { + b.Fatal(err) + } + + if match { + c++ + } + } + + if c != 22185 { + b.Fatalf("wrong number of matches: expected 22185, got %d", c) + } + } +} diff --git a/internal/filter/testdata/libreoffice.txt.bz2 b/internal/filter/testdata/libreoffice.txt.bz2 new file mode 100644 index 000000000..adc90f2e2 Binary files /dev/null and b/internal/filter/testdata/libreoffice.txt.bz2 differ diff --git a/internal/fs/const.go b/internal/fs/const.go new file mode 100644 index 000000000..f5f65de8e --- /dev/null +++ b/internal/fs/const.go @@ -0,0 +1,17 @@ +package fs + +import "syscall" + +// Flags to OpenFile wrapping those of the underlying system. Not all flags may +// be implemented on a given system. +const ( + O_RDONLY int = syscall.O_RDONLY // open the file read-only. + O_WRONLY int = syscall.O_WRONLY // open the file write-only. + O_RDWR int = syscall.O_RDWR // open the file read-write. + O_APPEND int = syscall.O_APPEND // append data to the file when writing. + O_CREATE int = syscall.O_CREAT // create a new file if none exists. + O_EXCL int = syscall.O_EXCL // used with O_CREATE, file must not exist + O_SYNC int = syscall.O_SYNC // open for synchronous I/O. + O_TRUNC int = syscall.O_TRUNC // if possible, truncate file when opened. + O_NONBLOCK int = syscall.O_NONBLOCK // don't block open on fifos etc. +) diff --git a/internal/fs/const_unix.go b/internal/fs/const_unix.go new file mode 100644 index 000000000..a90d171b1 --- /dev/null +++ b/internal/fs/const_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package fs + +import "syscall" + +// O_NOFOLLOW instructs the kernel to not follow symlinks when opening a file. +const O_NOFOLLOW int = syscall.O_NOFOLLOW diff --git a/internal/fs/const_windows.go b/internal/fs/const_windows.go new file mode 100644 index 000000000..18c89c27e --- /dev/null +++ b/internal/fs/const_windows.go @@ -0,0 +1,6 @@ +// +build windows + +package fs + +// O_NOFOLLOW is a noop on Windows. +const O_NOFOLLOW int = 0 diff --git a/internal/fs/deviceid_unix.go b/internal/fs/deviceid_unix.go new file mode 100644 index 000000000..31efd29ff --- /dev/null +++ b/internal/fs/deviceid_unix.go @@ -0,0 +1,30 @@ +// +build !windows + +package fs + +import ( + "os" + "syscall" + + "github.com/restic/restic/internal/errors" +) + +// DeviceID extracts the device ID from an os.FileInfo object by casting it +// to syscall.Stat_t +func DeviceID(fi os.FileInfo) (deviceID uint64, err error) { + if fi == nil { + return 0, errors.New("unable to determine device: fi is nil") + } + + if fi.Sys() == nil { + return 0, errors.New("unable to determine device: fi.Sys() is nil") + } + + if st, ok := fi.Sys().(*syscall.Stat_t); ok { + // st.Dev is uint32 on Darwin and uint64 on Linux. Just cast + // everything to uint64. + return uint64(st.Dev), nil + } + + return 0, errors.New("Could not cast to syscall.Stat_t") +} diff --git a/internal/fs/deviceid_windows.go b/internal/fs/deviceid_windows.go new file mode 100644 index 000000000..4e2f2f9de --- /dev/null +++ b/internal/fs/deviceid_windows.go @@ -0,0 +1,15 @@ +// +build windows + +package fs + +import ( + "os" + + "github.com/restic/restic/internal/errors" +) + +// DeviceID extracts the device ID from an os.FileInfo object by casting it +// to syscall.Stat_t +func DeviceID(fi os.FileInfo) (deviceID uint64, err error) { + return 0, errors.New("Device IDs are not supported on Windows") +} diff --git a/internal/fs/doc.go b/internal/fs/doc.go new file mode 100644 index 000000000..06072381e --- /dev/null +++ b/internal/fs/doc.go @@ -0,0 +1,3 @@ +// Package fs implements an OS independent abstraction of a file system +// suitable for backup purposes. +package fs diff --git a/internal/fs/file.go b/internal/fs/file.go new file mode 100644 index 000000000..86c519aff --- /dev/null +++ b/internal/fs/file.go @@ -0,0 +1,118 @@ +package fs + +import ( + "os" + "path/filepath" + "time" +) + +// Mkdir creates a new directory with the specified name and permission bits. +// If there is an error, it will be of type *PathError. +func Mkdir(name string, perm os.FileMode) error { + return os.Mkdir(fixpath(name), perm) +} + +// Readlink returns the destination of the named symbolic link. +// If there is an error, it will be of type *PathError. +func Readlink(name string) (string, error) { + return os.Readlink(fixpath(name)) +} + +// Remove removes the named file or directory. +// If there is an error, it will be of type *PathError. +func Remove(name string) error { + return os.Remove(fixpath(name)) +} + +// RemoveAll removes path and any children it contains. +// It removes everything it can but returns the first error +// it encounters. If the path does not exist, RemoveAll +// returns nil (no error). +func RemoveAll(path string) error { + return os.RemoveAll(fixpath(path)) +} + +// Rename renames (moves) oldpath to newpath. +// If newpath already exists, Rename replaces it. +// OS-specific restrictions may apply when oldpath and newpath are in different directories. +// If there is an error, it will be of type *LinkError. +func Rename(oldpath, newpath string) error { + return os.Rename(fixpath(oldpath), fixpath(newpath)) +} + +// Symlink creates newname as a symbolic link to oldname. +// If there is an error, it will be of type *LinkError. +func Symlink(oldname, newname string) error { + return os.Symlink(fixpath(oldname), fixpath(newname)) +} + +// Link creates newname as a hard link to oldname. +// If there is an error, it will be of type *LinkError. +func Link(oldname, newname string) error { + return os.Link(fixpath(oldname), fixpath(newname)) +} + +// Stat returns a FileInfo structure describing the named file. +// If there is an error, it will be of type *PathError. +func Stat(name string) (os.FileInfo, error) { + return os.Stat(fixpath(name)) +} + +// Lstat returns the FileInfo structure describing the named file. +// If the file is a symbolic link, the returned FileInfo +// describes the symbolic link. Lstat makes no attempt to follow the link. +// If there is an error, it will be of type *PathError. +func Lstat(name string) (os.FileInfo, error) { + return os.Lstat(fixpath(name)) +} + +// Create creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func Create(name string) (*os.File, error) { + return os.Create(fixpath(name)) +} + +// Open opens a file for reading. +func Open(name string) (File, error) { + return os.Open(fixpath(name)) +} + +// OpenFile is the generalized open call; most users will use Open +// or Create instead. It opens the named file with specified flag +// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, +// methods on the returned File can be used for I/O. +// If there is an error, it will be of type *PathError. +func OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) { + return os.OpenFile(fixpath(name), flag, perm) +} + +// Walk walks the file tree rooted at root, calling walkFn for each file or +// directory in the tree, including root. All errors that arise visiting files +// and directories are filtered by walkFn. The files are walked in lexical +// order, which makes the output deterministic but means that for very +// large directories Walk can be inefficient. +// Walk does not follow symbolic links. +func Walk(root string, walkFn filepath.WalkFunc) error { + return filepath.Walk(fixpath(root), walkFn) +} + +// RemoveIfExists removes a file, returning no error if it does not exist. +func RemoveIfExists(filename string) error { + err := os.Remove(filename) + if err != nil && os.IsNotExist(err) { + err = nil + } + return err +} + +// Chtimes changes the access and modification times of the named file, +// similar to the Unix utime() or utimes() functions. +// +// The underlying filesystem may truncate or round the values to a less +// precise time unit. If there is an error, it will be of type *PathError. +func Chtimes(name string, atime time.Time, mtime time.Time) error { + return os.Chtimes(fixpath(name), atime, mtime) +} diff --git a/internal/fs/file_unix.go b/internal/fs/file_unix.go new file mode 100644 index 000000000..612465670 --- /dev/null +++ b/internal/fs/file_unix.go @@ -0,0 +1,58 @@ +// +build !windows + +package fs + +import ( + "io/ioutil" + "os" + "syscall" +) + +// fixpath returns an absolute path on windows, so restic can open long file +// names. +func fixpath(name string) string { + return name +} + +// MkdirAll creates a directory named path, along with any necessary parents, +// and returns nil, or else returns an error. The permission bits perm are used +// for all directories that MkdirAll creates. If path is already a directory, +// MkdirAll does nothing and returns nil. +func MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(fixpath(path), perm) +} + +// TempFile creates a temporary file which has already been deleted (on +// supported platforms) +func TempFile(dir, prefix string) (f *os.File, err error) { + f, err = ioutil.TempFile(dir, prefix) + if err != nil { + return nil, err + } + + if err = os.Remove(f.Name()); err != nil { + return nil, err + } + + return f, nil +} + +// isNotSuported returns true if the error is caused by an unsupported file system feature. +func isNotSupported(err error) bool { + if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ENOTSUP { + return true + } + return false +} + +// Chmod changes the mode of the named file to mode. +func Chmod(name string, mode os.FileMode) error { + err := os.Chmod(fixpath(name), mode) + + // ignore the error if the FS does not support setting this mode (e.g. CIFS with gvfs on Linux) + if err != nil && isNotSupported(err) { + return nil + } + + return err +} diff --git a/internal/fs/file_windows.go b/internal/fs/file_windows.go new file mode 100644 index 000000000..dd53cde5e --- /dev/null +++ b/internal/fs/file_windows.go @@ -0,0 +1,98 @@ +package fs + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + "syscall" +) + +// fixpath returns an absolute path on windows, so restic can open long file +// names. +func fixpath(name string) string { + abspath, err := filepath.Abs(name) + if err == nil { + // Check if \\?\UNC\ already exist + if strings.HasPrefix(abspath, `\\?\UNC\`) { + return abspath + } + // Check if \\?\ already exist + if strings.HasPrefix(abspath, `\\?\`) { + return abspath + } + // Check if path starts with \\ + if strings.HasPrefix(abspath, `\\`) { + return strings.Replace(abspath, `\\`, `\\?\UNC\`, 1) + } + // Normal path + return `\\?\` + abspath + } + return name +} + +// MkdirAll creates a directory named path, along with any necessary parents, +// and returns nil, or else returns an error. The permission bits perm are used +// for all directories that MkdirAll creates. If path is already a directory, +// MkdirAll does nothing and returns nil. +// +// Adapted from the stdlib MkdirAll, added test for volume name. +func MkdirAll(path string, perm os.FileMode) error { + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := os.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{ + Op: "mkdir", + Path: path, + Err: syscall.ENOTDIR, + } + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent + parent := path[0 : j-1] + if parent != filepath.VolumeName(parent) { + err = MkdirAll(parent, perm) + if err != nil { + return err + } + } + } + + // Parent now exists; invoke Mkdir and use its result. + err = os.Mkdir(path, perm) + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} + +// TempFile creates a temporary file. +func TempFile(dir, prefix string) (f *os.File, err error) { + return ioutil.TempFile(dir, prefix) +} + +// Chmod changes the mode of the named file to mode. +func Chmod(name string, mode os.FileMode) error { + return os.Chmod(fixpath(name), mode) +} diff --git a/internal/fs/fs_helpers.go b/internal/fs/fs_helpers.go new file mode 100644 index 000000000..6b269f763 --- /dev/null +++ b/internal/fs/fs_helpers.go @@ -0,0 +1,45 @@ +package fs + +import "os" + +// ReadDir reads the directory named by dirname within fs and returns a list of +// directory entries. +func ReadDir(fs FS, dirname string) ([]os.FileInfo, error) { + f, err := fs.Open(dirname) + if err != nil { + return nil, err + } + + entries, err := f.Readdir(-1) + if err != nil { + return nil, err + } + + err = f.Close() + if err != nil { + return nil, err + } + + return entries, nil +} + +// ReadDirNames reads the directory named by dirname within fs and returns a +// list of entry names. +func ReadDirNames(fs FS, dirname string) ([]string, error) { + f, err := fs.Open(dirname) + if err != nil { + return nil, err + } + + entries, err := f.Readdirnames(-1) + if err != nil { + return nil, err + } + + err = f.Close() + if err != nil { + return nil, err + } + + return entries, nil +} diff --git a/internal/fs/fs_local.go b/internal/fs/fs_local.go new file mode 100644 index 000000000..dd1faafa0 --- /dev/null +++ b/internal/fs/fs_local.go @@ -0,0 +1,96 @@ +package fs + +import ( + "os" + "path/filepath" +) + +// Local is the local file system. Most methods are just passed on to the stdlib. +type Local struct{} + +// statically ensure that Local implements FS. +var _ FS = &Local{} + +// VolumeName returns leading volume name. Given "C:\foo\bar" it returns "C:" +// on Windows. Given "\\host\share\foo" it returns "\\host\share". On other +// platforms it returns "". +func (fs Local) VolumeName(path string) string { + return filepath.VolumeName(path) +} + +// Open opens a file for reading. +func (fs Local) Open(name string) (File, error) { + f, err := os.Open(fixpath(name)) + if err != nil { + return nil, err + } + return f, nil +} + +// OpenFile is the generalized open call; most users will use Open +// or Create instead. It opens the named file with specified flag +// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, +// methods on the returned File can be used for I/O. +// If there is an error, it will be of type *PathError. +func (fs Local) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + f, err := os.OpenFile(fixpath(name), flag, perm) + if err != nil { + return nil, err + } + return f, nil +} + +// Stat returns a FileInfo describing the named file. If there is an error, it +// will be of type *PathError. +func (fs Local) Stat(name string) (os.FileInfo, error) { + return os.Stat(fixpath(name)) +} + +// Lstat returns the FileInfo structure describing the named file. +// If the file is a symbolic link, the returned FileInfo +// describes the symbolic link. Lstat makes no attempt to follow the link. +// If there is an error, it will be of type *PathError. +func (fs Local) Lstat(name string) (os.FileInfo, error) { + return os.Lstat(fixpath(name)) +} + +// Join joins any number of path elements into a single path, adding a +// Separator if necessary. Join calls Clean on the result; in particular, all +// empty strings are ignored. On Windows, the result is a UNC path if and only +// if the first path element is a UNC path. +func (fs Local) Join(elem ...string) string { + return filepath.Join(elem...) +} + +// Separator returns the OS and FS dependent separator for dirs/subdirs/files. +func (fs Local) Separator() string { + return string(filepath.Separator) +} + +// IsAbs reports whether the path is absolute. +func (fs Local) IsAbs(path string) bool { + return filepath.IsAbs(path) +} + +// Abs returns an absolute representation of path. If the path is not absolute +// it will be joined with the current working directory to turn it into an +// absolute path. The absolute path name for a given file is not guaranteed to +// be unique. Abs calls Clean on the result. +func (fs Local) Abs(path string) (string, error) { + return filepath.Abs(path) +} + +// Clean returns the cleaned path. For details, see filepath.Clean. +func (fs Local) Clean(p string) string { + return filepath.Clean(p) +} + +// Base returns the last element of path. +func (fs Local) Base(path string) string { + return filepath.Base(path) +} + +// Dir returns path without the last element. +func (fs Local) Dir(path string) string { + return filepath.Dir(path) +} diff --git a/internal/fs/fs_reader.go b/internal/fs/fs_reader.go new file mode 100644 index 000000000..385c8f92b --- /dev/null +++ b/internal/fs/fs_reader.go @@ -0,0 +1,289 @@ +package fs + +import ( + "io" + "os" + "path" + "sync" + "syscall" + "time" + + "github.com/restic/restic/internal/errors" +) + +// Reader is a file system which provides a directory with a single file. When +// this file is opened for reading, the reader is passed through. The file can +// be opened once, all subsequent open calls return syscall.EIO. For Lstat(), +// the provided FileInfo is returned. +type Reader struct { + Name string + io.ReadCloser + + Mode os.FileMode + ModTime time.Time + Size int64 + + open sync.Once +} + +// statically ensure that Local implements FS. +var _ FS = &Reader{} + +// VolumeName returns leading volume name, for the Reader file system it's +// always the empty string. +func (fs *Reader) VolumeName(path string) string { + return "" +} + +// Open opens a file for reading. +func (fs *Reader) Open(name string) (f File, err error) { + switch name { + case fs.Name: + fs.open.Do(func() { + f = newReaderFile(fs.ReadCloser, fs.fi()) + }) + + if f == nil { + return nil, syscall.EIO + } + + return f, nil + case "/", ".": + f = fakeDir{ + entries: []os.FileInfo{fs.fi()}, + } + return f, nil + } + + return nil, syscall.ENOENT +} + +func (fs *Reader) fi() os.FileInfo { + return fakeFileInfo{ + name: fs.Name, + size: fs.Size, + mode: fs.Mode, + modtime: fs.ModTime, + } +} + +// OpenFile is the generalized open call; most users will use Open +// or Create instead. It opens the named file with specified flag +// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, +// methods on the returned File can be used for I/O. +// If there is an error, it will be of type *PathError. +func (fs *Reader) OpenFile(name string, flag int, perm os.FileMode) (f File, err error) { + if flag & ^(O_RDONLY|O_NOFOLLOW) != 0 { + return nil, errors.Errorf("invalid combination of flags 0x%x", flag) + } + + fs.open.Do(func() { + f = newReaderFile(fs.ReadCloser, fs.fi()) + }) + + if f == nil { + return nil, syscall.EIO + } + + return f, nil +} + +// Stat returns a FileInfo describing the named file. If there is an error, it +// will be of type *PathError. +func (fs *Reader) Stat(name string) (os.FileInfo, error) { + return fs.Lstat(name) +} + +// Lstat returns the FileInfo structure describing the named file. +// If the file is a symbolic link, the returned FileInfo +// describes the symbolic link. Lstat makes no attempt to follow the link. +// If there is an error, it will be of type *PathError. +func (fs *Reader) Lstat(name string) (os.FileInfo, error) { + switch name { + case fs.Name: + return fs.fi(), nil + case "/", ".": + fi := fakeFileInfo{ + name: name, + size: 0, + mode: 0755, + modtime: time.Now(), + } + return fi, nil + } + + return nil, os.ErrNotExist +} + +// Join joins any number of path elements into a single path, adding a +// Separator if necessary. Join calls Clean on the result; in particular, all +// empty strings are ignored. On Windows, the result is a UNC path if and only +// if the first path element is a UNC path. +func (fs *Reader) Join(elem ...string) string { + return path.Join(elem...) +} + +// Separator returns the OS and FS dependent separator for dirs/subdirs/files. +func (fs *Reader) Separator() string { + return "/" +} + +// IsAbs reports whether the path is absolute. For the Reader, this is always the case. +func (fs *Reader) IsAbs(p string) bool { + return true +} + +// Abs returns an absolute representation of path. If the path is not absolute +// it will be joined with the current working directory to turn it into an +// absolute path. The absolute path name for a given file is not guaranteed to +// be unique. Abs calls Clean on the result. +// +// For the Reader, all paths are absolute. +func (fs *Reader) Abs(p string) (string, error) { + return path.Clean(p), nil +} + +// Clean returns the cleaned path. For details, see filepath.Clean. +func (fs *Reader) Clean(p string) string { + return path.Clean(p) +} + +// Base returns the last element of p. +func (fs *Reader) Base(p string) string { + return path.Base(p) +} + +// Dir returns p without the last element. +func (fs *Reader) Dir(p string) string { + return path.Dir(p) +} + +func newReaderFile(rd io.ReadCloser, fi os.FileInfo) readerFile { + return readerFile{ + ReadCloser: rd, + fakeFile: fakeFile{ + FileInfo: fi, + name: fi.Name(), + }, + } +} + +type readerFile struct { + io.ReadCloser + fakeFile +} + +func (r readerFile) Read(p []byte) (int, error) { + return r.ReadCloser.Read(p) +} + +func (r readerFile) Close() error { + return r.ReadCloser.Close() +} + +// ensure that readerFile implements File +var _ File = readerFile{} + +// fakeFile implements all File methods, but only returns errors for anything +// except Stat() and Name(). +type fakeFile struct { + name string + os.FileInfo +} + +// ensure that fakeFile implements File +var _ File = fakeFile{} + +func (f fakeFile) Fd() uintptr { + return 0 +} + +func (f fakeFile) Readdirnames(n int) ([]string, error) { + return nil, os.ErrInvalid +} + +func (f fakeFile) Readdir(n int) ([]os.FileInfo, error) { + return nil, os.ErrInvalid +} + +func (f fakeFile) Seek(int64, int) (int64, error) { + return 0, os.ErrInvalid +} + +func (f fakeFile) Write(p []byte) (int, error) { + return 0, os.ErrInvalid +} + +func (f fakeFile) Read(p []byte) (int, error) { + return 0, os.ErrInvalid +} + +func (f fakeFile) Close() error { + return nil +} + +func (f fakeFile) Stat() (os.FileInfo, error) { + return f.FileInfo, nil +} + +func (f fakeFile) Name() string { + return f.name +} + +// fakeDir implements Readdirnames and Readdir, everything else is delegated to fakeFile. +type fakeDir struct { + entries []os.FileInfo + fakeFile +} + +func (d fakeDir) Readdirnames(n int) ([]string, error) { + if n >= 0 { + return nil, errors.New("not implemented") + } + names := make([]string, 0, len(d.entries)) + for _, entry := range d.entries { + names = append(names, entry.Name()) + } + + return names, nil +} + +func (d fakeDir) Readdir(n int) ([]os.FileInfo, error) { + if n >= 0 { + return nil, errors.New("not implemented") + } + return d.entries, nil +} + +// fakeFileInfo implements the bare minimum of os.FileInfo. +type fakeFileInfo struct { + name string + size int64 + mode os.FileMode + modtime time.Time + sys interface{} +} + +func (fi fakeFileInfo) Name() string { + return fi.name +} + +func (fi fakeFileInfo) Size() int64 { + return fi.size +} + +func (fi fakeFileInfo) Mode() os.FileMode { + return fi.mode +} + +func (fi fakeFileInfo) ModTime() time.Time { + return fi.modtime +} + +func (fi fakeFileInfo) IsDir() bool { + return fi.mode&os.ModeDir > 0 +} + +func (fi fakeFileInfo) Sys() interface{} { + return fi.sys +} diff --git a/internal/fs/fs_reader_test.go b/internal/fs/fs_reader_test.go new file mode 100644 index 000000000..f4cb2bb34 --- /dev/null +++ b/internal/fs/fs_reader_test.go @@ -0,0 +1,319 @@ +package fs + +import ( + "bytes" + "io/ioutil" + "os" + "sort" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/restic/restic/internal/test" +) + +func verifyFileContentOpen(t testing.TB, fs FS, filename string, want []byte) { + f, err := fs.Open(filename) + if err != nil { + t.Fatal(err) + } + + buf, err := ioutil.ReadAll(f) + if err != nil { + t.Fatal(err) + } + + err = f.Close() + if err != nil { + t.Fatal(err) + } + + if !cmp.Equal(want, buf) { + t.Error(cmp.Diff(want, buf)) + } +} + +func verifyFileContentOpenFile(t testing.TB, fs FS, filename string, want []byte) { + f, err := fs.OpenFile(filename, O_RDONLY, 0) + if err != nil { + t.Fatal(err) + } + + buf, err := ioutil.ReadAll(f) + if err != nil { + t.Fatal(err) + } + + err = f.Close() + if err != nil { + t.Fatal(err) + } + + if !cmp.Equal(want, buf) { + t.Error(cmp.Diff(want, buf)) + } +} + +func verifyDirectoryContents(t testing.TB, fs FS, dir string, want []string) { + f, err := fs.Open(dir) + if err != nil { + t.Fatal(err) + } + + entries, err := f.Readdirnames(-1) + if err != nil { + t.Fatal(err) + } + + err = f.Close() + if err != nil { + t.Fatal(err) + } + + sort.Sort(sort.StringSlice(want)) + sort.Sort(sort.StringSlice(entries)) + + if !cmp.Equal(want, entries) { + t.Error(cmp.Diff(want, entries)) + } +} + +type fiSlice []os.FileInfo + +func (s fiSlice) Len() int { + return len(s) +} + +func (s fiSlice) Less(i, j int) bool { + return s[i].Name() < s[j].Name() +} + +func (s fiSlice) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func verifyDirectoryContentsFI(t testing.TB, fs FS, dir string, want []os.FileInfo) { + f, err := fs.Open(dir) + if err != nil { + t.Fatal(err) + } + + entries, err := f.Readdir(-1) + if err != nil { + t.Fatal(err) + } + + err = f.Close() + if err != nil { + t.Fatal(err) + } + + sort.Sort(fiSlice(want)) + sort.Sort(fiSlice(entries)) + + if len(want) != len(entries) { + t.Errorf("wrong number of entries returned, want %d, got %d", len(want), len(entries)) + } + max := len(want) + if len(entries) < max { + max = len(entries) + } + + for i := 0; i < max; i++ { + fi1 := want[i] + fi2 := entries[i] + + if fi1.Name() != fi2.Name() { + t.Errorf("entry %d: wrong value for Name: want %q, got %q", i, fi1.Name(), fi2.Name()) + } + + if fi1.IsDir() != fi2.IsDir() { + t.Errorf("entry %d: wrong value for IsDir: want %v, got %v", i, fi1.IsDir(), fi2.IsDir()) + } + + if fi1.Mode() != fi2.Mode() { + t.Errorf("entry %d: wrong value for Mode: want %v, got %v", i, fi1.Mode(), fi2.Mode()) + } + + if fi1.ModTime() != fi2.ModTime() { + t.Errorf("entry %d: wrong value for ModTime: want %v, got %v", i, fi1.ModTime(), fi2.ModTime()) + } + + if fi1.Size() != fi2.Size() { + t.Errorf("entry %d: wrong value for Size: want %v, got %v", i, fi1.Size(), fi2.Size()) + } + + if fi1.Sys() != fi2.Sys() { + t.Errorf("entry %d: wrong value for Sys: want %v, got %v", i, fi1.Sys(), fi2.Sys()) + } + } +} + +func checkFileInfo(t testing.TB, fi os.FileInfo, filename string, modtime time.Time, mode os.FileMode, isdir bool) { + if fi.IsDir() { + t.Errorf("IsDir returned true, want false") + } + + if fi.Mode() != mode { + t.Errorf("Mode() returned wrong value, want 0%o, got 0%o", mode, fi.Mode()) + } + + if !modtime.Equal(time.Time{}) && !fi.ModTime().Equal(modtime) { + t.Errorf("ModTime() returned wrong value, want %v, got %v", modtime, fi.ModTime()) + } + + if fi.Name() != filename { + t.Errorf("Name() returned wrong value, want %q, got %q", filename, fi.Name()) + } +} + +func TestFSReader(t *testing.T) { + data := test.Random(55, 1<<18+588) + now := time.Now() + filename := "foobar" + + var tests = []struct { + name string + f func(t *testing.T, fs FS) + }{ + { + name: "Readdirnames-slash", + f: func(t *testing.T, fs FS) { + verifyDirectoryContents(t, fs, "/", []string{filename}) + }, + }, + { + name: "Readdirnames-current", + f: func(t *testing.T, fs FS) { + verifyDirectoryContents(t, fs, ".", []string{filename}) + }, + }, + { + name: "Readdir-slash", + f: func(t *testing.T, fs FS) { + fi := fakeFileInfo{ + mode: 0644, + modtime: now, + name: filename, + size: int64(len(data)), + } + verifyDirectoryContentsFI(t, fs, "/", []os.FileInfo{fi}) + }, + }, + { + name: "Readdir-current", + f: func(t *testing.T, fs FS) { + fi := fakeFileInfo{ + mode: 0644, + modtime: now, + name: filename, + size: int64(len(data)), + } + verifyDirectoryContentsFI(t, fs, ".", []os.FileInfo{fi}) + }, + }, + { + name: "file/Open", + f: func(t *testing.T, fs FS) { + verifyFileContentOpen(t, fs, filename, data) + }, + }, + { + name: "file/OpenFile", + f: func(t *testing.T, fs FS) { + verifyFileContentOpenFile(t, fs, filename, data) + }, + }, + { + name: "file/Lstat", + f: func(t *testing.T, fs FS) { + fi, err := fs.Lstat(filename) + if err != nil { + t.Fatal(err) + } + + checkFileInfo(t, fi, filename, now, 0644, false) + }, + }, + { + name: "file/Stat", + f: func(t *testing.T, fs FS) { + f, err := fs.Open(filename) + if err != nil { + t.Fatal(err) + } + + fi, err := f.Stat() + if err != nil { + t.Fatal(err) + } + + err = f.Close() + if err != nil { + t.Fatal(err) + } + + checkFileInfo(t, fi, filename, now, 0644, false) + }, + }, + { + name: "dir/Lstat-slash", + f: func(t *testing.T, fs FS) { + fi, err := fs.Lstat("/") + if err != nil { + t.Fatal(err) + } + + checkFileInfo(t, fi, "/", time.Time{}, 0755, false) + }, + }, + { + name: "dir/Lstat-current", + f: func(t *testing.T, fs FS) { + fi, err := fs.Lstat(".") + if err != nil { + t.Fatal(err) + } + + checkFileInfo(t, fi, ".", time.Time{}, 0755, false) + }, + }, + { + name: "dir/Open-slash", + f: func(t *testing.T, fs FS) { + fi, err := fs.Lstat("/") + if err != nil { + t.Fatal(err) + } + + checkFileInfo(t, fi, "/", time.Time{}, 0755, false) + }, + }, + { + name: "dir/Open-current", + f: func(t *testing.T, fs FS) { + fi, err := fs.Lstat(".") + if err != nil { + t.Fatal(err) + } + + checkFileInfo(t, fi, ".", time.Time{}, 0755, false) + }, + }, + } + + for _, test := range tests { + fs := &Reader{ + Name: filename, + ReadCloser: ioutil.NopCloser(bytes.NewReader(data)), + + Mode: 0644, + Size: int64(len(data)), + ModTime: now, + } + + t.Run(test.name, func(t *testing.T) { + test.f(t, fs) + }) + } +} diff --git a/internal/fs/fs_track.go b/internal/fs/fs_track.go new file mode 100644 index 000000000..319fbfaff --- /dev/null +++ b/internal/fs/fs_track.go @@ -0,0 +1,54 @@ +package fs + +import ( + "fmt" + "os" + "runtime" + "runtime/debug" +) + +// Track is a wrapper around another file system which installs finalizers +// for open files which call panic() when they are not closed when the garbage +// collector releases them. This can be used to find resource leaks via open +// files. +type Track struct { + FS +} + +// Open wraps the Open method of the underlying file system. +func (fs Track) Open(name string) (File, error) { + f, err := fs.FS.Open(fixpath(name)) + if err != nil { + return nil, err + } + + return newTrackFile(debug.Stack(), name, f), nil +} + +// OpenFile wraps the OpenFile method of the underlying file system. +func (fs Track) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + f, err := fs.FS.OpenFile(fixpath(name), flag, perm) + if err != nil { + return nil, err + } + + return newTrackFile(debug.Stack(), name, f), nil +} + +type trackFile struct { + File +} + +func newTrackFile(stack []byte, filename string, file File) *trackFile { + f := &trackFile{file} + runtime.SetFinalizer(f, func(f *trackFile) { + fmt.Fprintf(os.Stderr, "file %s not closed\n\nStacktrack:\n%s\n", filename, stack) + panic("file " + filename + " not closed") + }) + return f +} + +func (f *trackFile) Close() error { + runtime.SetFinalizer(f, nil) + return f.File.Close() +} diff --git a/internal/fs/helpers.go b/internal/fs/helpers.go new file mode 100644 index 000000000..768cc975b --- /dev/null +++ b/internal/fs/helpers.go @@ -0,0 +1,63 @@ +package fs + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/restic/restic/internal/test" +) + +// IsRegularFile returns true if fi belongs to a normal file. If fi is nil, +// false is returned. +func IsRegularFile(fi os.FileInfo) bool { + if fi == nil { + return false + } + + return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0 +} + +// TestChdir changes the current directory to dest, the function back returns to the previous directory. +func TestChdir(t testing.TB, dest string) (back func()) { + test.Helper(t).Helper() + + prev, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + t.Logf("chdir to %v", dest) + err = os.Chdir(dest) + if err != nil { + t.Fatal(err) + } + + return func() { + test.Helper(t).Helper() + t.Logf("chdir back to %v", prev) + err = os.Chdir(prev) + if err != nil { + t.Fatal(err) + } + } +} + +// TestTempFile returns a new temporary file, which is removed when cleanup() +// is called. +func TestTempFile(t testing.TB, prefix string) (File, func()) { + f, err := ioutil.TempFile("", prefix) + if err != nil { + t.Fatal(err) + } + + cleanup := func() { + _ = f.Close() + err = Remove(f.Name()) + if err != nil { + t.Fatal(err) + } + } + + return f, cleanup +} diff --git a/internal/fs/interface.go b/internal/fs/interface.go new file mode 100644 index 000000000..1c2260215 --- /dev/null +++ b/internal/fs/interface.go @@ -0,0 +1,38 @@ +package fs + +import ( + "io" + "os" +) + +// FS bundles all methods needed for a file system. +type FS interface { + Open(name string) (File, error) + OpenFile(name string, flag int, perm os.FileMode) (File, error) + Stat(name string) (os.FileInfo, error) + Lstat(name string) (os.FileInfo, error) + + Join(elem ...string) string + Separator() string + Abs(path string) (string, error) + Clean(path string) string + VolumeName(path string) string + IsAbs(path string) bool + + Dir(path string) string + Base(path string) string +} + +// File is an open file on a file system. +type File interface { + io.Reader + io.Writer + io.Closer + + Fd() uintptr + Readdirnames(n int) ([]string, error) + Readdir(int) ([]os.FileInfo, error) + Seek(int64, int) (int64, error) + Stat() (os.FileInfo, error) + Name() string +} diff --git a/internal/fs/path_prefix.go b/internal/fs/path_prefix.go new file mode 100644 index 000000000..e5ab5c0db --- /dev/null +++ b/internal/fs/path_prefix.go @@ -0,0 +1,42 @@ +package fs + +import ( + "path/filepath" +) + +// HasPathPrefix returns true if p is a subdir of (or a file within) base. It +// assumes a file system which is case sensitive. If the paths are not of the +// same type (one is relative, the other is absolute), false is returned. +func HasPathPrefix(base, p string) bool { + if filepath.VolumeName(base) != filepath.VolumeName(p) { + return false + } + + // handle case when base and p are not of the same type + if filepath.IsAbs(base) != filepath.IsAbs(p) { + return false + } + + base = filepath.Clean(base) + p = filepath.Clean(p) + + if base == p { + return true + } + + for { + dir := filepath.Dir(p) + + if base == dir { + return true + } + + if p == dir { + break + } + + p = dir + } + + return false +} diff --git a/internal/fs/path_prefix_test.go b/internal/fs/path_prefix_test.go new file mode 100644 index 000000000..aff04bd83 --- /dev/null +++ b/internal/fs/path_prefix_test.go @@ -0,0 +1,59 @@ +package fs + +import ( + "path/filepath" + "runtime" + "testing" +) + +func fromSlashAbs(p string) string { + if runtime.GOOS == "windows" { + if len(p) > 0 && p[0] == '/' { + p = "c:" + p + } + } + + return filepath.FromSlash(p) +} + +func TestHasPathPrefix(t *testing.T) { + var tests = []struct { + base, p string + result bool + }{ + {"", "", true}, + {".", ".", true}, + {".", "foo", true}, + {"foo", ".", false}, + {"/", "", false}, + {"/", "x", false}, + {"x", "/", false}, + {"/", "/x", true}, + {"/x", "/y", false}, + {"/home/user/foo", "/home", false}, + {"/home/user/foo/", "/home", false}, + {"/home/user/foo", "/home/", false}, + {"/home/user/foo/", "/home/", false}, + {"/home/user/foo", "/home/user/foo/bar", true}, + {"/home/user/foo", "/home/user/foo/bar/baz/x/y/z", true}, + {"/home/user/foo", "/home/user/foobar", false}, + {"/home/user/Foo", "/home/user/foo/bar/baz", false}, + {"/home/user/foo", "/home/user/Foo/bar/baz", false}, + {"user/foo", "user/foo/bar/baz", true}, + {"user/foo", "./user/foo", true}, + {"user/foo", "./user/foo/", true}, + {"/home/user/foo", "./user/foo/", false}, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + base := fromSlashAbs(test.base) + p := fromSlashAbs(test.p) + result := HasPathPrefix(base, p) + if result != test.result { + t.Fatalf("wrong result for HasPathPrefix(%q, %q): want %v, got %v", + base, p, test.result, result) + } + }) + } +} diff --git a/internal/fs/stat.go b/internal/fs/stat.go new file mode 100644 index 000000000..d37d12942 --- /dev/null +++ b/internal/fs/stat.go @@ -0,0 +1,34 @@ +package fs + +import ( + "os" + "time" +) + +// ExtendedFileInfo is an extended stat_t, filled with attributes that are +// supported by most operating systems. The original FileInfo is embedded. +type ExtendedFileInfo struct { + os.FileInfo + + DeviceID uint64 // ID of device containing the file + Inode uint64 // Inode number + Links uint64 // Number of hard links + UID uint32 // owner user ID + GID uint32 // owner group ID + Device uint64 // Device ID (if this is a device file) + BlockSize int64 // block size for filesystem IO + Blocks int64 // number of allocated filesystem blocks + Size int64 // file size in byte + + AccessTime time.Time // last access time stamp + ModTime time.Time // last (content) modification time stamp +} + +// ExtendedStat returns an ExtendedFileInfo constructed from the os.FileInfo. +func ExtendedStat(fi os.FileInfo) ExtendedFileInfo { + if fi == nil { + panic("os.FileInfo is nil") + } + + return extendedStat(fi) +} diff --git a/internal/fs/stat_bsd.go b/internal/fs/stat_bsd.go new file mode 100644 index 000000000..62a258e64 --- /dev/null +++ b/internal/fs/stat_bsd.go @@ -0,0 +1,36 @@ +// +build freebsd darwin netbsd + +package fs + +import ( + "fmt" + "os" + "syscall" + "time" +) + +// extendedStat extracts info into an ExtendedFileInfo for unix based operating systems. +func extendedStat(fi os.FileInfo) ExtendedFileInfo { + s, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + panic(fmt.Sprintf("conversion to syscall.Stat_t failed, type is %T", fi.Sys())) + } + + extFI := ExtendedFileInfo{ + FileInfo: fi, + DeviceID: uint64(s.Dev), + Inode: uint64(s.Ino), + Links: uint64(s.Nlink), + UID: s.Uid, + GID: s.Gid, + Device: uint64(s.Rdev), + BlockSize: int64(s.Blksize), + Blocks: s.Blocks, + Size: s.Size, + + AccessTime: time.Unix(s.Atimespec.Unix()), + ModTime: time.Unix(s.Mtimespec.Unix()), + } + + return extFI +} diff --git a/internal/fs/stat_test.go b/internal/fs/stat_test.go new file mode 100644 index 000000000..43e514047 --- /dev/null +++ b/internal/fs/stat_test.go @@ -0,0 +1,31 @@ +package fs + +import ( + "io/ioutil" + "path/filepath" + "testing" + + restictest "github.com/restic/restic/internal/test" +) + +func TestExtendedStat(t *testing.T) { + tempdir, cleanup := restictest.TempDir(t) + defer cleanup() + + filename := filepath.Join(tempdir, "file") + err := ioutil.WriteFile(filename, []byte("foobar"), 0640) + if err != nil { + t.Fatal(err) + } + + fi, err := Lstat(filename) + if err != nil { + t.Fatal(err) + } + + extFI := ExtendedStat(fi) + + if !extFI.ModTime.Equal(fi.ModTime()) { + t.Errorf("extFI.ModTime does not match, want %v, got %v", fi.ModTime(), extFI.ModTime) + } +} diff --git a/internal/fs/stat_unix.go b/internal/fs/stat_unix.go new file mode 100644 index 000000000..56c22f8bc --- /dev/null +++ b/internal/fs/stat_unix.go @@ -0,0 +1,36 @@ +// +build !windows,!darwin,!freebsd,!netbsd + +package fs + +import ( + "fmt" + "os" + "syscall" + "time" +) + +// extendedStat extracts info into an ExtendedFileInfo for unix based operating systems. +func extendedStat(fi os.FileInfo) ExtendedFileInfo { + s, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + panic(fmt.Sprintf("conversion to syscall.Stat_t failed, type is %T", fi.Sys())) + } + + extFI := ExtendedFileInfo{ + FileInfo: fi, + DeviceID: uint64(s.Dev), + Inode: s.Ino, + Links: uint64(s.Nlink), + UID: s.Uid, + GID: s.Gid, + Device: uint64(s.Rdev), + BlockSize: int64(s.Blksize), + Blocks: s.Blocks, + Size: s.Size, + + AccessTime: time.Unix(s.Atim.Unix()), + ModTime: time.Unix(s.Mtim.Unix()), + } + + return extFI +} diff --git a/internal/fs/stat_windows.go b/internal/fs/stat_windows.go new file mode 100644 index 000000000..16f9fe0eb --- /dev/null +++ b/internal/fs/stat_windows.go @@ -0,0 +1,31 @@ +// +build windows + +package fs + +import ( + "fmt" + "os" + "syscall" + "time" +) + +// extendedStat extracts info into an ExtendedFileInfo for Windows. +func extendedStat(fi os.FileInfo) ExtendedFileInfo { + s, ok := fi.Sys().(*syscall.Win32FileAttributeData) + if !ok { + panic(fmt.Sprintf("conversion to syscall.Win32FileAttributeData failed, type is %T", fi.Sys())) + } + + extFI := ExtendedFileInfo{ + FileInfo: fi, + Size: int64(s.FileSizeLow) + int64(s.FileSizeHigh)<<32, + } + + atime := syscall.NsecToTimespec(s.LastAccessTime.Nanoseconds()) + extFI.AccessTime = time.Unix(atime.Unix()) + + mtime := syscall.NsecToTimespec(s.LastWriteTime.Nanoseconds()) + extFI.ModTime = time.Unix(mtime.Unix()) + + return extFI +} diff --git a/internal/fuse/blob_size_cache.go b/internal/fuse/blob_size_cache.go new file mode 100644 index 000000000..47599a820 --- /dev/null +++ b/internal/fuse/blob_size_cache.go @@ -0,0 +1,37 @@ +// +build !netbsd +// +build !openbsd +// +build !solaris +// +build !windows + +package fuse + +import ( + "github.com/restic/restic/internal/restic" + "golang.org/x/net/context" +) + +// BlobSizeCache caches the size of blobs in the repo. +type BlobSizeCache struct { + m map[restic.ID]uint +} + +// NewBlobSizeCache returns a new blob size cache containing all entries from midx. +func NewBlobSizeCache(ctx context.Context, idx restic.Index) *BlobSizeCache { + m := make(map[restic.ID]uint, 1000) + for pb := range idx.Each(ctx) { + m[pb.ID] = uint(restic.PlaintextLength(int(pb.Length))) + } + return &BlobSizeCache{ + m: m, + } +} + +// Lookup returns the size of the blob id. +func (c *BlobSizeCache) Lookup(id restic.ID) (size uint, found bool) { + if c == nil { + return 0, false + } + + size, found = c.m[id] + return size, found +} diff --git a/internal/fuse/dir.go b/internal/fuse/dir.go new file mode 100644 index 000000000..f899a4313 --- /dev/null +++ b/internal/fuse/dir.go @@ -0,0 +1,220 @@ +// +build !netbsd +// +build !openbsd +// +build !solaris +// +build !windows + +package fuse + +import ( + "os" + "path/filepath" + + "bazil.org/fuse" + "bazil.org/fuse/fs" + "golang.org/x/net/context" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/restic" +) + +// Statically ensure that *dir implement those interface +var _ = fs.HandleReadDirAller(&dir{}) +var _ = fs.NodeStringLookuper(&dir{}) + +type dir struct { + root *Root + items map[string]*restic.Node + inode uint64 + parentInode uint64 + node *restic.Node + + blobsize *BlobSizeCache +} + +func cleanupNodeName(name string) string { + return filepath.Base(name) +} + +func newDir(ctx context.Context, root *Root, inode, parentInode uint64, node *restic.Node) (*dir, error) { + debug.Log("new dir for %v (%v)", node.Name, node.Subtree) + tree, err := root.repo.LoadTree(ctx, *node.Subtree) + if err != nil { + debug.Log(" error loading tree %v: %v", node.Subtree, err) + return nil, err + } + items := make(map[string]*restic.Node) + for _, node := range tree.Nodes { + items[cleanupNodeName(node.Name)] = node + } + + return &dir{ + root: root, + node: node, + items: items, + inode: inode, + parentInode: parentInode, + }, nil +} + +// replaceSpecialNodes replaces nodes with name "." and "/" by their contents. +// Otherwise, the node is returned. +func replaceSpecialNodes(ctx context.Context, repo restic.Repository, node *restic.Node) ([]*restic.Node, error) { + if node.Type != "dir" || node.Subtree == nil { + return []*restic.Node{node}, nil + } + + if node.Name != "." && node.Name != "/" { + return []*restic.Node{node}, nil + } + + tree, err := repo.LoadTree(ctx, *node.Subtree) + if err != nil { + return nil, err + } + + return tree.Nodes, nil +} + +func newDirFromSnapshot(ctx context.Context, root *Root, inode uint64, snapshot *restic.Snapshot) (*dir, error) { + debug.Log("new dir for snapshot %v (%v)", snapshot.ID(), snapshot.Tree) + tree, err := root.repo.LoadTree(ctx, *snapshot.Tree) + if err != nil { + debug.Log(" loadTree(%v) failed: %v", snapshot.ID(), err) + return nil, err + } + items := make(map[string]*restic.Node) + for _, n := range tree.Nodes { + nodes, err := replaceSpecialNodes(ctx, root.repo, n) + if err != nil { + debug.Log(" replaceSpecialNodes(%v) failed: %v", n, err) + return nil, err + } + + for _, node := range nodes { + items[cleanupNodeName(node.Name)] = node + } + } + + return &dir{ + root: root, + node: &restic.Node{ + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + AccessTime: snapshot.Time, + ModTime: snapshot.Time, + ChangeTime: snapshot.Time, + Mode: os.ModeDir | 0555, + }, + items: items, + inode: inode, + }, nil +} + +func (d *dir) Attr(ctx context.Context, a *fuse.Attr) error { + debug.Log("called") + a.Inode = d.inode + a.Mode = os.ModeDir | d.node.Mode + + if !d.root.cfg.OwnerIsRoot { + a.Uid = d.node.UID + a.Gid = d.node.GID + } + a.Atime = d.node.AccessTime + a.Ctime = d.node.ChangeTime + a.Mtime = d.node.ModTime + + a.Nlink = d.calcNumberOfLinks() + + return nil +} + +func (d *dir) calcNumberOfLinks() uint32 { + // a directory d has 2 hardlinks + the number + // of directories contained by d + var count uint32 + count = 2 + for _, node := range d.items { + if node.Type == "dir" { + count++ + } + } + return count +} + +func (d *dir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { + debug.Log("called") + ret := make([]fuse.Dirent, 0, len(d.items)+2) + + ret = append(ret, fuse.Dirent{ + Inode: d.inode, + Name: ".", + Type: fuse.DT_Dir, + }) + + ret = append(ret, fuse.Dirent{ + Inode: d.parentInode, + Name: "..", + Type: fuse.DT_Dir, + }) + + for _, node := range d.items { + name := cleanupNodeName(node.Name) + var typ fuse.DirentType + switch node.Type { + case "dir": + typ = fuse.DT_Dir + case "file": + typ = fuse.DT_File + case "symlink": + typ = fuse.DT_Link + } + + ret = append(ret, fuse.Dirent{ + Inode: fs.GenerateDynamicInode(d.inode, name), + Type: typ, + Name: name, + }) + } + + return ret, nil +} + +func (d *dir) Lookup(ctx context.Context, name string) (fs.Node, error) { + debug.Log("Lookup(%v)", name) + node, ok := d.items[name] + if !ok { + debug.Log(" Lookup(%v) -> not found", name) + return nil, fuse.ENOENT + } + switch node.Type { + case "dir": + return newDir(ctx, d.root, fs.GenerateDynamicInode(d.inode, name), d.inode, node) + case "file": + return newFile(ctx, d.root, fs.GenerateDynamicInode(d.inode, name), node) + case "symlink": + return newLink(ctx, d.root, fs.GenerateDynamicInode(d.inode, name), node) + case "dev", "chardev", "fifo", "socket": + return newOther(ctx, d.root, fs.GenerateDynamicInode(d.inode, name), node) + default: + debug.Log(" node %v has unknown type %v", name, node.Type) + return nil, fuse.ENOENT + } +} + +func (d *dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error { + debug.Log("Listxattr(%v, %v)", d.node.Name, req.Size) + for _, attr := range d.node.ExtendedAttributes { + resp.Append(attr.Name) + } + return nil +} + +func (d *dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { + debug.Log("Getxattr(%v, %v, %v)", d.node.Name, req.Name, req.Size) + attrval := d.node.GetExtendedAttribute(req.Name) + if attrval != nil { + resp.Xattr = attrval + return nil + } + return fuse.ErrNoXattr +} diff --git a/internal/fuse/file.go b/internal/fuse/file.go new file mode 100644 index 000000000..5fefd5e3e --- /dev/null +++ b/internal/fuse/file.go @@ -0,0 +1,184 @@ +// +build !netbsd +// +build !openbsd +// +build !solaris +// +build !windows + +package fuse + +import ( + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + + "github.com/restic/restic/internal/debug" + + "bazil.org/fuse" + "bazil.org/fuse/fs" + "golang.org/x/net/context" +) + +// The default block size to report in stat +const blockSize = 512 + +// Statically ensure that *file implements the given interface +var _ = fs.HandleReader(&file{}) +var _ = fs.HandleReleaser(&file{}) + +type file struct { + root *Root + node *restic.Node + inode uint64 + + sizes []int + blobs [][]byte +} + +func newFile(ctx context.Context, root *Root, inode uint64, node *restic.Node) (fusefile *file, err error) { + debug.Log("create new file for %v with %d blobs", node.Name, len(node.Content)) + var bytes uint64 + sizes := make([]int, len(node.Content)) + for i, id := range node.Content { + size, ok := root.blobSizeCache.Lookup(id) + if !ok { + var found bool + size, found = root.repo.LookupBlobSize(id, restic.DataBlob) + if !found { + return nil, errors.Errorf("id %v not found in repository", id) + } + } + + sizes[i] = int(size) + bytes += uint64(size) + } + + if bytes != node.Size { + debug.Log("sizes do not match: node.Size %v != size %v, using real size", node.Size, bytes) + node.Size = bytes + } + + return &file{ + inode: inode, + root: root, + node: node, + sizes: sizes, + blobs: make([][]byte, len(node.Content)), + }, nil +} + +func (f *file) Attr(ctx context.Context, a *fuse.Attr) error { + debug.Log("Attr(%v)", f.node.Name) + a.Inode = f.inode + a.Mode = f.node.Mode + a.Size = f.node.Size + a.Blocks = (f.node.Size / blockSize) + 1 + a.BlockSize = blockSize + a.Nlink = uint32(f.node.Links) + + if !f.root.cfg.OwnerIsRoot { + a.Uid = f.node.UID + a.Gid = f.node.GID + } + a.Atime = f.node.AccessTime + a.Ctime = f.node.ChangeTime + a.Mtime = f.node.ModTime + + return nil + +} + +func (f *file) getBlobAt(ctx context.Context, i int) (blob []byte, err error) { + debug.Log("getBlobAt(%v, %v)", f.node.Name, i) + if f.blobs[i] != nil { + return f.blobs[i], nil + } + + // release earlier blobs + for j := 0; j < i; j++ { + f.blobs[j] = nil + } + + buf := restic.NewBlobBuffer(f.sizes[i]) + n, err := f.root.repo.LoadBlob(ctx, restic.DataBlob, f.node.Content[i], buf) + if err != nil { + debug.Log("LoadBlob(%v, %v) failed: %v", f.node.Name, f.node.Content[i], err) + return nil, err + } + f.blobs[i] = buf[:n] + + return buf[:n], nil +} + +func (f *file) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { + debug.Log("Read(%v, %v, %v), file size %v", f.node.Name, req.Size, req.Offset, f.node.Size) + offset := req.Offset + + if uint64(offset) > f.node.Size { + debug.Log("Read(%v): offset is greater than file size: %v > %v", + f.node.Name, req.Offset, f.node.Size) + + // return no data + resp.Data = resp.Data[:0] + return nil + } + + // handle special case: file is empty + if f.node.Size == 0 { + resp.Data = resp.Data[:0] + return nil + } + + // Skip blobs before the offset + startContent := 0 + for offset > int64(f.sizes[startContent]) { + offset -= int64(f.sizes[startContent]) + startContent++ + } + + dst := resp.Data[0:req.Size] + readBytes := 0 + remainingBytes := req.Size + for i := startContent; remainingBytes > 0 && i < len(f.sizes); i++ { + blob, err := f.getBlobAt(ctx, i) + if err != nil { + return err + } + + if offset > 0 { + blob = blob[offset:] + offset = 0 + } + + copied := copy(dst, blob) + remainingBytes -= copied + readBytes += copied + + dst = dst[copied:] + } + resp.Data = resp.Data[:readBytes] + + return nil +} + +func (f *file) Release(ctx context.Context, req *fuse.ReleaseRequest) error { + for i := range f.blobs { + f.blobs[i] = nil + } + return nil +} + +func (f *file) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error { + debug.Log("Listxattr(%v, %v)", f.node.Name, req.Size) + for _, attr := range f.node.ExtendedAttributes { + resp.Append(attr.Name) + } + return nil +} + +func (f *file) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { + debug.Log("Getxattr(%v, %v, %v)", f.node.Name, req.Name, req.Size) + attrval := f.node.GetExtendedAttribute(req.Name) + if attrval != nil { + resp.Xattr = attrval + return nil + } + return fuse.ErrNoXattr +} diff --git a/internal/fuse/file_test.go b/internal/fuse/file_test.go new file mode 100644 index 000000000..2909d6636 --- /dev/null +++ b/internal/fuse/file_test.go @@ -0,0 +1,154 @@ +// +build !netbsd +// +build !openbsd +// +build !solaris +// +build !windows + +package fuse + +import ( + "bytes" + "math/rand" + "testing" + "time" + + "golang.org/x/net/context" + + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + + "bazil.org/fuse" + "bazil.org/fuse/fs" + + rtest "github.com/restic/restic/internal/test" +) + +func testRead(t testing.TB, f *file, offset, length int, data []byte) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + req := &fuse.ReadRequest{ + Offset: int64(offset), + Size: length, + } + resp := &fuse.ReadResponse{ + Data: data, + } + rtest.OK(t, f.Read(ctx, req, resp)) +} + +func firstSnapshotID(t testing.TB, repo restic.Repository) (first restic.ID) { + err := repo.List(context.TODO(), restic.SnapshotFile, func(id restic.ID, size int64) error { + if first.IsNull() { + first = id + } + return nil + }) + + if err != nil { + t.Fatal(err) + } + + return first +} + +func loadFirstSnapshot(t testing.TB, repo restic.Repository) *restic.Snapshot { + id := firstSnapshotID(t, repo) + sn, err := restic.LoadSnapshot(context.TODO(), repo, id) + rtest.OK(t, err) + return sn +} + +func loadTree(t testing.TB, repo restic.Repository, id restic.ID) *restic.Tree { + tree, err := repo.LoadTree(context.TODO(), id) + rtest.OK(t, err) + return tree +} + +func TestFuseFile(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + timestamp, err := time.Parse(time.RFC3339, "2017-01-24T10:42:56+01:00") + rtest.OK(t, err) + restic.TestCreateSnapshot(t, repo, timestamp, 2, 0.1) + + sn := loadFirstSnapshot(t, repo) + tree := loadTree(t, repo, *sn.Tree) + + var content restic.IDs + for _, node := range tree.Nodes { + content = append(content, node.Content...) + } + t.Logf("tree loaded, content: %v", content) + + var ( + filesize uint64 + memfile []byte + ) + for _, id := range content { + size, found := repo.LookupBlobSize(id, restic.DataBlob) + rtest.Assert(t, found, "Expected to find blob id %v", id) + filesize += uint64(size) + + buf := restic.NewBlobBuffer(int(size)) + n, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, buf) + rtest.OK(t, err) + + if uint(n) != size { + t.Fatalf("not enough bytes read for id %v: want %v, got %v", id.Str(), size, n) + } + + if uint(len(buf)) != size { + t.Fatalf("buffer has wrong length for id %v: want %v, got %v", id.Str(), size, len(buf)) + } + + memfile = append(memfile, buf...) + } + + t.Logf("filesize is %v, memfile has size %v", filesize, len(memfile)) + + node := &restic.Node{ + Name: "foo", + Inode: 23, + Mode: 0742, + Size: filesize, + Content: content, + } + root := &Root{ + blobSizeCache: NewBlobSizeCache(context.TODO(), repo.Index()), + repo: repo, + } + + t.Logf("blob cache has %d entries", len(root.blobSizeCache.m)) + + inode := fs.GenerateDynamicInode(1, "foo") + f, err := newFile(context.TODO(), root, inode, node) + rtest.OK(t, err) + + attr := fuse.Attr{} + rtest.OK(t, f.Attr(ctx, &attr)) + + rtest.Equals(t, inode, attr.Inode) + rtest.Equals(t, node.Mode, attr.Mode) + rtest.Equals(t, node.Size, attr.Size) + rtest.Equals(t, (node.Size/uint64(attr.BlockSize))+1, attr.Blocks) + + for i := 0; i < 200; i++ { + offset := rand.Intn(int(filesize)) + length := rand.Intn(int(filesize)-offset) + 100 + + b := memfile[offset : offset+length] + + buf := make([]byte, length) + + testRead(t, f, offset, length, buf) + if !bytes.Equal(b, buf) { + t.Errorf("test %d failed, wrong data returned (offset %v, length %v)", i, offset, length) + } + } + + rtest.OK(t, f.Release(ctx, nil)) +} diff --git a/internal/fuse/link.go b/internal/fuse/link.go new file mode 100644 index 000000000..6772ac5f0 --- /dev/null +++ b/internal/fuse/link.go @@ -0,0 +1,47 @@ +// +build !netbsd +// +build !openbsd +// +build !solaris +// +build !windows + +package fuse + +import ( + "bazil.org/fuse" + "bazil.org/fuse/fs" + "github.com/restic/restic/internal/restic" + "golang.org/x/net/context" +) + +// Statically ensure that *link implements the given interface +var _ = fs.NodeReadlinker(&link{}) + +type link struct { + root *Root + node *restic.Node + inode uint64 +} + +func newLink(ctx context.Context, root *Root, inode uint64, node *restic.Node) (*link, error) { + return &link{root: root, inode: inode, node: node}, nil +} + +func (l *link) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) { + return l.node.LinkTarget, nil +} + +func (l *link) Attr(ctx context.Context, a *fuse.Attr) error { + a.Inode = l.inode + a.Mode = l.node.Mode + + if !l.root.cfg.OwnerIsRoot { + a.Uid = l.node.UID + a.Gid = l.node.GID + } + a.Atime = l.node.AccessTime + a.Ctime = l.node.ChangeTime + a.Mtime = l.node.ModTime + + a.Nlink = uint32(l.node.Links) + + return nil +} diff --git a/internal/fuse/meta_dir.go b/internal/fuse/meta_dir.go new file mode 100644 index 000000000..3271b3c29 --- /dev/null +++ b/internal/fuse/meta_dir.go @@ -0,0 +1,90 @@ +// +build !netbsd +// +build !openbsd +// +build !solaris +// +build !windows + +package fuse + +import ( + "os" + + "github.com/restic/restic/internal/debug" + + "golang.org/x/net/context" + + "bazil.org/fuse" + "bazil.org/fuse/fs" +) + +// ensure that *DirSnapshots implements these interfaces +var _ = fs.HandleReadDirAller(&MetaDir{}) +var _ = fs.NodeStringLookuper(&MetaDir{}) + +// MetaDir is a fuse directory which contains other directories. +type MetaDir struct { + inode uint64 + root *Root + entries map[string]fs.Node +} + +// NewMetaDir returns a new meta dir. +func NewMetaDir(root *Root, inode uint64, entries map[string]fs.Node) *MetaDir { + debug.Log("new meta dir with %d entries, inode %d", len(entries), inode) + + return &MetaDir{ + root: root, + inode: inode, + entries: entries, + } +} + +// Attr returns the attributes for the root node. +func (d *MetaDir) Attr(ctx context.Context, attr *fuse.Attr) error { + attr.Inode = d.inode + attr.Mode = os.ModeDir | 0555 + + if !d.root.cfg.OwnerIsRoot { + attr.Uid = uint32(os.Getuid()) + attr.Gid = uint32(os.Getgid()) + } + debug.Log("attr: %v", attr) + return nil +} + +// ReadDirAll returns all entries of the root node. +func (d *MetaDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { + debug.Log("ReadDirAll()") + items := []fuse.Dirent{ + { + Inode: d.inode, + Name: ".", + Type: fuse.DT_Dir, + }, + { + Inode: d.root.inode, + Name: "..", + Type: fuse.DT_Dir, + }, + } + + for name := range d.entries { + items = append(items, fuse.Dirent{ + Inode: fs.GenerateDynamicInode(d.inode, name), + Name: name, + Type: fuse.DT_Dir, + }) + } + + return items, nil +} + +// Lookup returns a specific entry from the root node. +func (d *MetaDir) Lookup(ctx context.Context, name string) (fs.Node, error) { + debug.Log("Lookup(%s)", name) + + if dir, ok := d.entries[name]; ok { + return dir, nil + } + + return nil, fuse.ENOENT +} diff --git a/internal/fuse/other.go b/internal/fuse/other.go new file mode 100644 index 000000000..b497087e6 --- /dev/null +++ b/internal/fuse/other.go @@ -0,0 +1,43 @@ +// +build !netbsd +// +build !openbsd +// +build !solaris +// +build !windows + +package fuse + +import ( + "bazil.org/fuse" + "github.com/restic/restic/internal/restic" + "golang.org/x/net/context" +) + +type other struct { + root *Root + node *restic.Node + inode uint64 +} + +func newOther(ctx context.Context, root *Root, inode uint64, node *restic.Node) (*other, error) { + return &other{root: root, inode: inode, node: node}, nil +} + +func (l *other) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) { + return l.node.LinkTarget, nil +} + +func (l *other) Attr(ctx context.Context, a *fuse.Attr) error { + a.Inode = l.inode + a.Mode = l.node.Mode + + if !l.root.cfg.OwnerIsRoot { + a.Uid = l.node.UID + a.Gid = l.node.GID + } + a.Atime = l.node.AccessTime + a.Ctime = l.node.ChangeTime + a.Mtime = l.node.ModTime + + a.Nlink = uint32(l.node.Links) + + return nil +} diff --git a/internal/fuse/root.go b/internal/fuse/root.go new file mode 100644 index 000000000..3ffa8a7ac --- /dev/null +++ b/internal/fuse/root.go @@ -0,0 +1,75 @@ +// +build !netbsd +// +build !openbsd +// +build !solaris +// +build !windows + +package fuse + +import ( + "time" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/restic" + + "golang.org/x/net/context" + + "bazil.org/fuse/fs" +) + +// Config holds settings for the fuse mount. +type Config struct { + OwnerIsRoot bool + Host string + Tags []restic.TagList + Paths []string + SnapshotTemplate string +} + +// Root is the root node of the fuse mount of a repository. +type Root struct { + repo restic.Repository + cfg Config + inode uint64 + snapshots restic.Snapshots + blobSizeCache *BlobSizeCache + + snCount int + lastCheck time.Time + + *MetaDir +} + +// ensure that *Root implements these interfaces +var _ = fs.HandleReadDirAller(&Root{}) +var _ = fs.NodeStringLookuper(&Root{}) + +const rootInode = 1 + +// NewRoot initializes a new root node from a repository. +func NewRoot(ctx context.Context, repo restic.Repository, cfg Config) (*Root, error) { + debug.Log("NewRoot(), config %v", cfg) + + root := &Root{ + repo: repo, + inode: rootInode, + cfg: cfg, + blobSizeCache: NewBlobSizeCache(ctx, repo.Index()), + } + + entries := map[string]fs.Node{ + "snapshots": NewSnapshotsDir(root, fs.GenerateDynamicInode(root.inode, "snapshots"), "", ""), + "tags": NewTagsDir(root, fs.GenerateDynamicInode(root.inode, "tags")), + "hosts": NewHostsDir(root, fs.GenerateDynamicInode(root.inode, "hosts")), + "ids": NewSnapshotsIDSDir(root, fs.GenerateDynamicInode(root.inode, "ids")), + } + + root.MetaDir = NewMetaDir(root, rootInode, entries) + + return root, nil +} + +// Root is just there to satisfy fs.Root, it returns itself. +func (r *Root) Root() (fs.Node, error) { + debug.Log("Root()") + return r, nil +} diff --git a/internal/fuse/snapshots_dir.go b/internal/fuse/snapshots_dir.go new file mode 100644 index 000000000..3b7d428ae --- /dev/null +++ b/internal/fuse/snapshots_dir.go @@ -0,0 +1,554 @@ +// +build !netbsd +// +build !openbsd +// +build !solaris +// +build !windows + +package fuse + +import ( + "fmt" + "os" + "time" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/restic" + + "golang.org/x/net/context" + + "bazil.org/fuse" + "bazil.org/fuse/fs" +) + +// SnapshotsDir is a fuse directory which contains snapshots named by timestamp. +type SnapshotsDir struct { + inode uint64 + root *Root + names map[string]*restic.Snapshot + latest string + tag string + host string + snCount int + + template string +} + +// SnapshotsIDSDir is a fuse directory which contains snapshots named by ids. +type SnapshotsIDSDir struct { + inode uint64 + root *Root + names map[string]*restic.Snapshot + snCount int +} + +// HostsDir is a fuse directory which contains hosts. +type HostsDir struct { + inode uint64 + root *Root + hosts map[string]bool + snCount int +} + +// TagsDir is a fuse directory which contains tags. +type TagsDir struct { + inode uint64 + root *Root + tags map[string]bool + snCount int +} + +// SnapshotLink +type snapshotLink struct { + root *Root + inode uint64 + target string + snapshot *restic.Snapshot +} + +// ensure that *SnapshotsDir implements these interfaces +var _ = fs.HandleReadDirAller(&SnapshotsDir{}) +var _ = fs.NodeStringLookuper(&SnapshotsDir{}) +var _ = fs.HandleReadDirAller(&SnapshotsIDSDir{}) +var _ = fs.NodeStringLookuper(&SnapshotsIDSDir{}) +var _ = fs.HandleReadDirAller(&TagsDir{}) +var _ = fs.NodeStringLookuper(&TagsDir{}) +var _ = fs.HandleReadDirAller(&HostsDir{}) +var _ = fs.NodeStringLookuper(&HostsDir{}) +var _ = fs.NodeReadlinker(&snapshotLink{}) + +// read tag names from the current repository-state. +func updateTagNames(d *TagsDir) { + if d.snCount != d.root.snCount { + d.snCount = d.root.snCount + d.tags = make(map[string]bool, len(d.root.snapshots)) + for _, snapshot := range d.root.snapshots { + for _, tag := range snapshot.Tags { + if tag != "" { + d.tags[tag] = true + } + } + } + } +} + +// read host names from the current repository-state. +func updateHostsNames(d *HostsDir) { + if d.snCount != d.root.snCount { + d.snCount = d.root.snCount + d.hosts = make(map[string]bool, len(d.root.snapshots)) + for _, snapshot := range d.root.snapshots { + d.hosts[snapshot.Hostname] = true + } + } +} + +// read snapshot id names from the current repository-state. +func updateSnapshotIDSNames(d *SnapshotsIDSDir) { + if d.snCount != d.root.snCount { + d.snCount = d.root.snCount + for _, sn := range d.root.snapshots { + name := sn.ID().Str() + d.names[name] = sn + } + } +} + +// NewSnapshotsDir returns a new directory containing snapshots. +func NewSnapshotsDir(root *Root, inode uint64, tag string, host string) *SnapshotsDir { + debug.Log("create snapshots dir, inode %d", inode) + d := &SnapshotsDir{ + root: root, + inode: inode, + names: make(map[string]*restic.Snapshot), + latest: "", + tag: tag, + host: host, + template: root.cfg.SnapshotTemplate, + } + + return d +} + +// NewSnapshotsIDSDir returns a new directory containing snapshots named by ids. +func NewSnapshotsIDSDir(root *Root, inode uint64) *SnapshotsIDSDir { + debug.Log("create snapshots ids dir, inode %d", inode) + d := &SnapshotsIDSDir{ + root: root, + inode: inode, + names: make(map[string]*restic.Snapshot), + } + + return d +} + +// NewHostsDir returns a new directory containing host names +func NewHostsDir(root *Root, inode uint64) *HostsDir { + debug.Log("create hosts dir, inode %d", inode) + d := &HostsDir{ + root: root, + inode: inode, + hosts: make(map[string]bool), + } + + return d +} + +// NewTagsDir returns a new directory containing tag names +func NewTagsDir(root *Root, inode uint64) *TagsDir { + debug.Log("create tags dir, inode %d", inode) + d := &TagsDir{ + root: root, + inode: inode, + tags: make(map[string]bool), + } + + return d +} + +// Attr returns the attributes for the root node. +func (d *SnapshotsDir) Attr(ctx context.Context, attr *fuse.Attr) error { + attr.Inode = d.inode + attr.Mode = os.ModeDir | 0555 + + if !d.root.cfg.OwnerIsRoot { + attr.Uid = uint32(os.Getuid()) + attr.Gid = uint32(os.Getgid()) + } + debug.Log("attr: %v", attr) + return nil +} + +// Attr returns the attributes for the SnapshotsDir. +func (d *SnapshotsIDSDir) Attr(ctx context.Context, attr *fuse.Attr) error { + attr.Inode = d.inode + attr.Mode = os.ModeDir | 0555 + + if !d.root.cfg.OwnerIsRoot { + attr.Uid = uint32(os.Getuid()) + attr.Gid = uint32(os.Getgid()) + } + debug.Log("attr: %v", attr) + return nil +} + +// Attr returns the attributes for the HostsDir. +func (d *HostsDir) Attr(ctx context.Context, attr *fuse.Attr) error { + attr.Inode = d.inode + attr.Mode = os.ModeDir | 0555 + + if !d.root.cfg.OwnerIsRoot { + attr.Uid = uint32(os.Getuid()) + attr.Gid = uint32(os.Getgid()) + } + debug.Log("attr: %v", attr) + return nil +} + +// Attr returns the attributes for the TagsDir. +func (d *TagsDir) Attr(ctx context.Context, attr *fuse.Attr) error { + attr.Inode = d.inode + attr.Mode = os.ModeDir | 0555 + + if !d.root.cfg.OwnerIsRoot { + attr.Uid = uint32(os.Getuid()) + attr.Gid = uint32(os.Getgid()) + } + debug.Log("attr: %v", attr) + return nil +} + +// search element in string list. +func isElem(e string, list []string) bool { + for _, x := range list { + if e == x { + return true + } + } + return false +} + +const minSnapshotsReloadTime = 60 * time.Second + +// update snapshots if repository has changed +func updateSnapshots(ctx context.Context, root *Root) error { + if time.Since(root.lastCheck) < minSnapshotsReloadTime { + return nil + } + + snapshots, err := restic.FindFilteredSnapshots(ctx, root.repo, root.cfg.Host, root.cfg.Tags, root.cfg.Paths) + if err != nil { + return err + } + + if root.snCount != len(snapshots) { + root.snCount = len(snapshots) + root.repo.LoadIndex(ctx) + root.snapshots = snapshots + } + root.lastCheck = time.Now() + + return nil +} + +// read snapshot timestamps from the current repository-state. +func updateSnapshotNames(d *SnapshotsDir, template string) { + if d.snCount != d.root.snCount { + d.snCount = d.root.snCount + var latestTime time.Time + d.latest = "" + d.names = make(map[string]*restic.Snapshot, len(d.root.snapshots)) + for _, sn := range d.root.snapshots { + if d.tag == "" || isElem(d.tag, sn.Tags) { + if d.host == "" || d.host == sn.Hostname { + name := sn.Time.Format(template) + if d.latest == "" || !sn.Time.Before(latestTime) { + latestTime = sn.Time + d.latest = name + } + for i := 1; ; i++ { + if _, ok := d.names[name]; !ok { + break + } + + name = fmt.Sprintf("%s-%d", sn.Time.Format(template), i) + } + + d.names[name] = sn + } + } + } + } +} + +// ReadDirAll returns all entries of the SnapshotsDir. +func (d *SnapshotsDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { + debug.Log("ReadDirAll()") + + // update snapshots + updateSnapshots(ctx, d.root) + + // update snapshot names + updateSnapshotNames(d, d.root.cfg.SnapshotTemplate) + + items := []fuse.Dirent{ + { + Inode: d.inode, + Name: ".", + Type: fuse.DT_Dir, + }, + { + Inode: d.root.inode, + Name: "..", + Type: fuse.DT_Dir, + }, + } + + for name := range d.names { + items = append(items, fuse.Dirent{ + Inode: fs.GenerateDynamicInode(d.inode, name), + Name: name, + Type: fuse.DT_Dir, + }) + } + + // Latest + if d.latest != "" { + items = append(items, fuse.Dirent{ + Inode: fs.GenerateDynamicInode(d.inode, "latest"), + Name: "latest", + Type: fuse.DT_Link, + }) + } + return items, nil +} + +// ReadDirAll returns all entries of the SnapshotsIDSDir. +func (d *SnapshotsIDSDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { + debug.Log("ReadDirAll()") + + // update snapshots + updateSnapshots(ctx, d.root) + + // update snapshot ids + updateSnapshotIDSNames(d) + + items := []fuse.Dirent{ + { + Inode: d.inode, + Name: ".", + Type: fuse.DT_Dir, + }, + { + Inode: d.root.inode, + Name: "..", + Type: fuse.DT_Dir, + }, + } + + for name := range d.names { + items = append(items, fuse.Dirent{ + Inode: fs.GenerateDynamicInode(d.inode, name), + Name: name, + Type: fuse.DT_Dir, + }) + } + + return items, nil +} + +// ReadDirAll returns all entries of the HostsDir. +func (d *HostsDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { + debug.Log("ReadDirAll()") + + // update snapshots + updateSnapshots(ctx, d.root) + + // update host names + updateHostsNames(d) + + items := []fuse.Dirent{ + { + Inode: d.inode, + Name: ".", + Type: fuse.DT_Dir, + }, + { + Inode: d.root.inode, + Name: "..", + Type: fuse.DT_Dir, + }, + } + + for host := range d.hosts { + items = append(items, fuse.Dirent{ + Inode: fs.GenerateDynamicInode(d.inode, host), + Name: host, + Type: fuse.DT_Dir, + }) + } + + return items, nil +} + +// ReadDirAll returns all entries of the TagsDir. +func (d *TagsDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { + debug.Log("ReadDirAll()") + + // update snapshots + updateSnapshots(ctx, d.root) + + // update tag names + updateTagNames(d) + + items := []fuse.Dirent{ + { + Inode: d.inode, + Name: ".", + Type: fuse.DT_Dir, + }, + { + Inode: d.root.inode, + Name: "..", + Type: fuse.DT_Dir, + }, + } + + for tag := range d.tags { + items = append(items, fuse.Dirent{ + Inode: fs.GenerateDynamicInode(d.inode, tag), + Name: tag, + Type: fuse.DT_Dir, + }) + } + + return items, nil +} + +// newSnapshotLink +func newSnapshotLink(ctx context.Context, root *Root, inode uint64, target string, snapshot *restic.Snapshot) (*snapshotLink, error) { + return &snapshotLink{root: root, inode: inode, target: target, snapshot: snapshot}, nil +} + +// Readlink +func (l *snapshotLink) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) { + return l.target, nil +} + +// Attr +func (l *snapshotLink) Attr(ctx context.Context, a *fuse.Attr) error { + a.Inode = l.inode + a.Mode = os.ModeSymlink | 0777 + + if !l.root.cfg.OwnerIsRoot { + a.Uid = uint32(os.Getuid()) + a.Gid = uint32(os.Getgid()) + } + a.Atime = l.snapshot.Time + a.Ctime = l.snapshot.Time + a.Mtime = l.snapshot.Time + + a.Nlink = 1 + + return nil +} + +// Lookup returns a specific entry from the SnapshotsDir. +func (d *SnapshotsDir) Lookup(ctx context.Context, name string) (fs.Node, error) { + debug.Log("Lookup(%s)", name) + + sn, ok := d.names[name] + if !ok { + // could not find entry. Updating repository-state + updateSnapshots(ctx, d.root) + + // update snapshot names + updateSnapshotNames(d, d.root.cfg.SnapshotTemplate) + + sn, ok := d.names[name] + if ok { + return newDirFromSnapshot(ctx, d.root, fs.GenerateDynamicInode(d.inode, name), sn) + } + + if name == "latest" && d.latest != "" { + sn, ok := d.names[d.latest] + + // internal error + if !ok { + return nil, fuse.ENOENT + } + + return newSnapshotLink(ctx, d.root, fs.GenerateDynamicInode(d.inode, name), d.latest, sn) + } + return nil, fuse.ENOENT + } + + return newDirFromSnapshot(ctx, d.root, fs.GenerateDynamicInode(d.inode, name), sn) +} + +// Lookup returns a specific entry from the SnapshotsIDSDir. +func (d *SnapshotsIDSDir) Lookup(ctx context.Context, name string) (fs.Node, error) { + debug.Log("Lookup(%s)", name) + + sn, ok := d.names[name] + if !ok { + // could not find entry. Updating repository-state + updateSnapshots(ctx, d.root) + + // update snapshot ids + updateSnapshotIDSNames(d) + + sn, ok := d.names[name] + if ok { + return newDirFromSnapshot(ctx, d.root, fs.GenerateDynamicInode(d.inode, name), sn) + } + + return nil, fuse.ENOENT + } + + return newDirFromSnapshot(ctx, d.root, fs.GenerateDynamicInode(d.inode, name), sn) +} + +// Lookup returns a specific entry from the HostsDir. +func (d *HostsDir) Lookup(ctx context.Context, name string) (fs.Node, error) { + debug.Log("Lookup(%s)", name) + + _, ok := d.hosts[name] + if !ok { + // could not find entry. Updating repository-state + updateSnapshots(ctx, d.root) + + // update host names + updateHostsNames(d) + + _, ok := d.hosts[name] + if ok { + return NewSnapshotsDir(d.root, fs.GenerateDynamicInode(d.root.inode, name), "", name), nil + } + + return nil, fuse.ENOENT + } + + return NewSnapshotsDir(d.root, fs.GenerateDynamicInode(d.root.inode, name), "", name), nil +} + +// Lookup returns a specific entry from the TagsDir. +func (d *TagsDir) Lookup(ctx context.Context, name string) (fs.Node, error) { + debug.Log("Lookup(%s)", name) + + _, ok := d.tags[name] + if !ok { + // could not find entry. Updating repository-state + updateSnapshots(ctx, d.root) + + // update tag names + updateTagNames(d) + + _, ok := d.tags[name] + if ok { + return NewSnapshotsDir(d.root, fs.GenerateDynamicInode(d.root.inode, name), name, ""), nil + } + + return nil, fuse.ENOENT + } + + return NewSnapshotsDir(d.root, fs.GenerateDynamicInode(d.root.inode, name), name, ""), nil +} diff --git a/internal/hashing/reader.go b/internal/hashing/reader.go new file mode 100644 index 000000000..a499f4a63 --- /dev/null +++ b/internal/hashing/reader.go @@ -0,0 +1,29 @@ +package hashing + +import ( + "hash" + "io" +) + +// Reader hashes all data read from the underlying reader. +type Reader struct { + r io.Reader + h hash.Hash +} + +// NewReader returns a new Reader that uses the hash h. +func NewReader(r io.Reader, h hash.Hash) *Reader { + return &Reader{ + h: h, + r: io.TeeReader(r, h), + } +} + +func (h *Reader) Read(p []byte) (int, error) { + return h.r.Read(p) +} + +// Sum returns the hash of the data read so far. +func (h *Reader) Sum(d []byte) []byte { + return h.h.Sum(d) +} diff --git a/internal/hashing/reader_test.go b/internal/hashing/reader_test.go new file mode 100644 index 000000000..d17f264de --- /dev/null +++ b/internal/hashing/reader_test.go @@ -0,0 +1,73 @@ +package hashing + +import ( + "bytes" + "crypto/rand" + "crypto/sha256" + "io" + "io/ioutil" + "testing" +) + +func TestReader(t *testing.T) { + tests := []int{5, 23, 2<<18 + 23, 1 << 20} + + for _, size := range tests { + data := make([]byte, size) + _, err := io.ReadFull(rand.Reader, data) + if err != nil { + t.Fatalf("ReadFull: %v", err) + } + + expectedHash := sha256.Sum256(data) + + rd := NewReader(bytes.NewReader(data), sha256.New()) + n, err := io.Copy(ioutil.Discard, rd) + if err != nil { + t.Fatal(err) + } + + if n != int64(size) { + t.Errorf("Reader: invalid number of bytes written: got %d, expected %d", + n, size) + } + + resultingHash := rd.Sum(nil) + + if !bytes.Equal(expectedHash[:], resultingHash) { + t.Errorf("Reader: hashes do not match: expected %02x, got %02x", + expectedHash, resultingHash) + } + } +} + +func BenchmarkReader(b *testing.B) { + buf := make([]byte, 1<<22) + _, err := io.ReadFull(rand.Reader, buf) + if err != nil { + b.Fatal(err) + } + + expectedHash := sha256.Sum256(buf) + + b.SetBytes(int64(len(buf))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + rd := NewReader(bytes.NewReader(buf), sha256.New()) + n, err := io.Copy(ioutil.Discard, rd) + if err != nil { + b.Fatal(err) + } + + if n != int64(len(buf)) { + b.Errorf("Reader: invalid number of bytes written: got %d, expected %d", + n, len(buf)) + } + + resultingHash := rd.Sum(nil) + if !bytes.Equal(expectedHash[:], resultingHash) { + b.Errorf("Reader: hashes do not match: expected %02x, got %02x", + expectedHash, resultingHash) + } + } +} diff --git a/internal/hashing/writer.go b/internal/hashing/writer.go new file mode 100644 index 000000000..2940a6271 --- /dev/null +++ b/internal/hashing/writer.go @@ -0,0 +1,31 @@ +package hashing + +import ( + "hash" + "io" +) + +// Writer transparently hashes all data while writing it to the underlying writer. +type Writer struct { + w io.Writer + h hash.Hash +} + +// NewWriter wraps the writer w and feeds all data written to the hash h. +func NewWriter(w io.Writer, h hash.Hash) *Writer { + return &Writer{ + h: h, + w: io.MultiWriter(w, h), + } +} + +// Write wraps the write method of the underlying writer and also hashes all data. +func (h *Writer) Write(p []byte) (int, error) { + n, err := h.w.Write(p) + return n, err +} + +// Sum returns the hash of all data written so far. +func (h *Writer) Sum(d []byte) []byte { + return h.h.Sum(d) +} diff --git a/internal/hashing/writer_test.go b/internal/hashing/writer_test.go new file mode 100644 index 000000000..46999f20f --- /dev/null +++ b/internal/hashing/writer_test.go @@ -0,0 +1,74 @@ +package hashing + +import ( + "bytes" + "crypto/rand" + "crypto/sha256" + "io" + "io/ioutil" + "testing" +) + +func TestWriter(t *testing.T) { + tests := []int{5, 23, 2<<18 + 23, 1 << 20} + + for _, size := range tests { + data := make([]byte, size) + _, err := io.ReadFull(rand.Reader, data) + if err != nil { + t.Fatalf("ReadFull: %v", err) + } + + expectedHash := sha256.Sum256(data) + + wr := NewWriter(ioutil.Discard, sha256.New()) + + n, err := io.Copy(wr, bytes.NewReader(data)) + if err != nil { + t.Fatal(err) + } + + if n != int64(size) { + t.Errorf("Writer: invalid number of bytes written: got %d, expected %d", + n, size) + } + + resultingHash := wr.Sum(nil) + + if !bytes.Equal(expectedHash[:], resultingHash) { + t.Errorf("Writer: hashes do not match: expected %02x, got %02x", + expectedHash, resultingHash) + } + } +} + +func BenchmarkWriter(b *testing.B) { + buf := make([]byte, 1<<22) + _, err := io.ReadFull(rand.Reader, buf) + if err != nil { + b.Fatal(err) + } + + expectedHash := sha256.Sum256(buf) + + b.SetBytes(int64(len(buf))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + wr := NewWriter(ioutil.Discard, sha256.New()) + n, err := io.Copy(wr, bytes.NewReader(buf)) + if err != nil { + b.Fatal(err) + } + + if n != int64(len(buf)) { + b.Errorf("Writer: invalid number of bytes written: got %d, expected %d", + n, len(buf)) + } + + resultingHash := wr.Sum(nil) + if !bytes.Equal(expectedHash[:], resultingHash) { + b.Errorf("Writer: hashes do not match: expected %02x, got %02x", + expectedHash, resultingHash) + } + } +} diff --git a/internal/index/index.go b/internal/index/index.go new file mode 100644 index 000000000..6c6294b21 --- /dev/null +++ b/internal/index/index.go @@ -0,0 +1,407 @@ +// Package index contains various data structures for indexing content in a repository or backend. +package index + +import ( + "context" + "fmt" + "os" + "sync" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/pack" + "github.com/restic/restic/internal/restic" + "golang.org/x/sync/errgroup" +) + +// Pack contains information about the contents of a pack. +type Pack struct { + ID restic.ID + Size int64 + Entries []restic.Blob +} + +// Index contains information about blobs and packs stored in a repo. +type Index struct { + Packs map[restic.ID]Pack + IndexIDs restic.IDSet +} + +func newIndex() *Index { + return &Index{ + Packs: make(map[restic.ID]Pack), + IndexIDs: restic.NewIDSet(), + } +} + +const listPackWorkers = 10 + +// Lister lists files and their contents +type Lister interface { + // List runs fn for all files of type t in the repo. + List(ctx context.Context, t restic.FileType, fn func(restic.ID, int64) error) error + + // ListPack returns the list of blobs saved in the pack id and the length + // of the file as stored in the backend. + ListPack(ctx context.Context, id restic.ID, size int64) ([]restic.Blob, int64, error) +} + +// New creates a new index for repo from scratch. InvalidFiles contains all IDs +// of files that cannot be listed successfully. +func New(ctx context.Context, repo Lister, ignorePacks restic.IDSet, p *restic.Progress) (idx *Index, invalidFiles restic.IDs, err error) { + p.Start() + defer p.Done() + + type Job struct { + PackID restic.ID + Size int64 + } + + type Result struct { + Error error + PackID restic.ID + Size int64 + Entries []restic.Blob + } + + inputCh := make(chan Job) + outputCh := make(chan Result) + wg, ctx := errgroup.WithContext(ctx) + + // list the files in the repo, send to inputCh + wg.Go(func() error { + defer close(inputCh) + return repo.List(ctx, restic.DataFile, func(id restic.ID, size int64) error { + if ignorePacks.Has(id) { + return nil + } + + job := Job{ + PackID: id, + Size: size, + } + + select { + case inputCh <- job: + case <-ctx.Done(): + } + return nil + }) + }) + + // run the workers listing the files, read from inputCh, send to outputCh + var workers sync.WaitGroup + for i := 0; i < listPackWorkers; i++ { + workers.Add(1) + go func() { + defer workers.Done() + for job := range inputCh { + res := Result{PackID: job.PackID} + res.Entries, res.Size, res.Error = repo.ListPack(ctx, job.PackID, job.Size) + + select { + case outputCh <- res: + case <-ctx.Done(): + return + } + } + }() + } + + // wait until all the workers are done, then close outputCh + wg.Go(func() error { + workers.Wait() + close(outputCh) + return nil + }) + + idx = newIndex() + + for res := range outputCh { + p.Report(restic.Stat{Blobs: 1}) + if res.Error != nil { + cause := errors.Cause(res.Error) + if _, ok := cause.(pack.InvalidFileError); ok { + invalidFiles = append(invalidFiles, res.PackID) + continue + } + + fmt.Fprintf(os.Stderr, "pack file cannot be listed %v: %v\n", res.PackID, res.Error) + continue + } + + debug.Log("pack %v contains %d blobs", res.PackID, len(res.Entries)) + + err := idx.AddPack(res.PackID, res.Size, res.Entries) + if err != nil { + return nil, nil, err + } + + select { + case <-ctx.Done(): // an error occurred + default: + } + } + + err = wg.Wait() + if err != nil { + return nil, nil, err + } + + return idx, invalidFiles, nil +} + +type packJSON struct { + ID restic.ID `json:"id"` + Blobs []blobJSON `json:"blobs"` +} + +type blobJSON struct { + ID restic.ID `json:"id"` + Type restic.BlobType `json:"type"` + Offset uint `json:"offset"` + Length uint `json:"length"` +} + +type indexJSON struct { + Supersedes restic.IDs `json:"supersedes,omitempty"` + Packs []packJSON `json:"packs"` +} + +// ListLoader allows listing files and their content, in addition to loading and unmarshaling JSON files. +type ListLoader interface { + Lister + LoadJSONUnpacked(context.Context, restic.FileType, restic.ID, interface{}) error +} + +func loadIndexJSON(ctx context.Context, repo ListLoader, id restic.ID) (*indexJSON, error) { + debug.Log("process index %v\n", id) + + var idx indexJSON + err := repo.LoadJSONUnpacked(ctx, restic.IndexFile, id, &idx) + if err != nil { + return nil, err + } + + return &idx, nil +} + +// Load creates an index by loading all index files from the repo. +func Load(ctx context.Context, repo ListLoader, p *restic.Progress) (*Index, error) { + debug.Log("loading indexes") + + p.Start() + defer p.Done() + + supersedes := make(map[restic.ID]restic.IDSet) + results := make(map[restic.ID]map[restic.ID]Pack) + + index := newIndex() + + err := repo.List(ctx, restic.IndexFile, func(id restic.ID, size int64) error { + p.Report(restic.Stat{Blobs: 1}) + + debug.Log("Load index %v", id) + idx, err := loadIndexJSON(ctx, repo, id) + if err != nil { + return err + } + + res := make(map[restic.ID]Pack) + supersedes[id] = restic.NewIDSet() + for _, sid := range idx.Supersedes { + debug.Log(" index %v supersedes %v", id, sid) + supersedes[id].Insert(sid) + } + + for _, jpack := range idx.Packs { + entries := make([]restic.Blob, 0, len(jpack.Blobs)) + for _, blob := range jpack.Blobs { + entry := restic.Blob{ + ID: blob.ID, + Type: blob.Type, + Offset: blob.Offset, + Length: blob.Length, + } + entries = append(entries, entry) + } + + if err = index.AddPack(jpack.ID, 0, entries); err != nil { + return err + } + } + + results[id] = res + index.IndexIDs.Insert(id) + + return nil + }) + + if err != nil { + return nil, err + } + + for superID, list := range supersedes { + for indexID := range list { + if _, ok := results[indexID]; !ok { + continue + } + debug.Log(" removing index %v, superseded by %v", indexID, superID) + fmt.Fprintf(os.Stderr, "index %v can be removed, superseded by index %v\n", indexID.Str(), superID.Str()) + delete(results, indexID) + } + } + + return index, nil +} + +// AddPack adds a pack to the index. If this pack is already in the index, an +// error is returned. +func (idx *Index) AddPack(id restic.ID, size int64, entries []restic.Blob) error { + if _, ok := idx.Packs[id]; ok { + return errors.Errorf("pack %v already present in the index", id.Str()) + } + + idx.Packs[id] = Pack{ID: id, Size: size, Entries: entries} + + return nil +} + +// RemovePack deletes a pack from the index. +func (idx *Index) RemovePack(id restic.ID) error { + if _, ok := idx.Packs[id]; !ok { + return errors.Errorf("pack %v not found in the index", id.Str()) + } + + delete(idx.Packs, id) + + return nil +} + +// DuplicateBlobs returns a list of blobs that are stored more than once in the +// repo. +func (idx *Index) DuplicateBlobs() (dups restic.BlobSet) { + dups = restic.NewBlobSet() + seen := restic.NewBlobSet() + + for _, p := range idx.Packs { + for _, entry := range p.Entries { + h := restic.BlobHandle{ID: entry.ID, Type: entry.Type} + if seen.Has(h) { + dups.Insert(h) + } + seen.Insert(h) + } + } + + return dups +} + +// PacksForBlobs returns the set of packs in which the blobs are contained. +func (idx *Index) PacksForBlobs(blobs restic.BlobSet) (packs restic.IDSet) { + packs = restic.NewIDSet() + + for id, p := range idx.Packs { + for _, entry := range p.Entries { + if blobs.Has(restic.BlobHandle{ID: entry.ID, Type: entry.Type}) { + packs.Insert(id) + } + } + } + + return packs +} + +// Location describes the location of a blob in a pack. +type Location struct { + PackID restic.ID + restic.Blob +} + +// ErrBlobNotFound is return by FindBlob when the blob could not be found in +// the index. +var ErrBlobNotFound = errors.New("blob not found in index") + +// FindBlob returns a list of packs and positions the blob can be found in. +func (idx *Index) FindBlob(h restic.BlobHandle) (result []Location, err error) { + for id, p := range idx.Packs { + for _, entry := range p.Entries { + if entry.ID.Equal(h.ID) && entry.Type == h.Type { + result = append(result, Location{ + PackID: id, + Blob: entry, + }) + } + } + } + + if len(result) == 0 { + return nil, ErrBlobNotFound + } + + return result, nil +} + +const maxEntries = 3000 + +// Saver saves structures as JSON. +type Saver interface { + SaveJSONUnpacked(ctx context.Context, t restic.FileType, item interface{}) (restic.ID, error) +} + +// Save writes the complete index to the repo. +func (idx *Index) Save(ctx context.Context, repo Saver, supersedes restic.IDs) (restic.IDs, error) { + debug.Log("pack files: %d\n", len(idx.Packs)) + + var indexIDs []restic.ID + + packs := 0 + jsonIDX := &indexJSON{ + Supersedes: supersedes, + Packs: make([]packJSON, 0, maxEntries), + } + + for packID, pack := range idx.Packs { + debug.Log("%04d add pack %v with %d entries", packs, packID, len(pack.Entries)) + b := make([]blobJSON, 0, len(pack.Entries)) + for _, blob := range pack.Entries { + b = append(b, blobJSON{ + ID: blob.ID, + Type: blob.Type, + Offset: blob.Offset, + Length: blob.Length, + }) + } + + p := packJSON{ + ID: packID, + Blobs: b, + } + + jsonIDX.Packs = append(jsonIDX.Packs, p) + + packs++ + if packs == maxEntries { + id, err := repo.SaveJSONUnpacked(ctx, restic.IndexFile, jsonIDX) + if err != nil { + return nil, err + } + debug.Log("saved new index as %v", id) + + indexIDs = append(indexIDs, id) + packs = 0 + jsonIDX.Packs = jsonIDX.Packs[:0] + } + } + + if packs > 0 { + id, err := repo.SaveJSONUnpacked(ctx, restic.IndexFile, jsonIDX) + if err != nil { + return nil, err + } + debug.Log("saved new index as %v", id) + indexIDs = append(indexIDs, id) + } + + return indexIDs, nil +} diff --git a/internal/index/index_test.go b/internal/index/index_test.go new file mode 100644 index 000000000..43198cf68 --- /dev/null +++ b/internal/index/index_test.go @@ -0,0 +1,475 @@ +package index + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/restic/restic/internal/checker" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/test" +) + +var ( + snapshotTime = time.Unix(1470492820, 207401672) + depth = 3 +) + +func createFilledRepo(t testing.TB, snapshots int, dup float32) (restic.Repository, func()) { + repo, cleanup := repository.TestRepository(t) + + for i := 0; i < 3; i++ { + restic.TestCreateSnapshot(t, repo, snapshotTime.Add(time.Duration(i)*time.Second), depth, dup) + } + + return repo, cleanup +} + +func validateIndex(t testing.TB, repo restic.Repository, idx *Index) { + err := repo.List(context.TODO(), restic.DataFile, func(id restic.ID, size int64) error { + p, ok := idx.Packs[id] + if !ok { + t.Errorf("pack %v missing from index", id.Str()) + } + + if !p.ID.Equal(id) { + t.Errorf("pack %v has invalid ID: want %v, got %v", id.Str(), id, p.ID) + } + return nil + }) + + if err != nil { + t.Fatal(err) + } +} + +func TestIndexNew(t *testing.T) { + repo, cleanup := createFilledRepo(t, 3, 0) + defer cleanup() + + idx, invalid, err := New(context.TODO(), repo, restic.NewIDSet(), nil) + if err != nil { + t.Fatalf("New() returned error %v", err) + } + + if idx == nil { + t.Fatalf("New() returned nil index") + } + + if len(invalid) > 0 { + t.Fatalf("New() returned invalid files: %v", invalid) + } + + validateIndex(t, repo, idx) +} + +type ErrorRepo struct { + restic.Repository + MaxListFiles int + + MaxPacks int + MaxPacksMutex sync.Mutex +} + +// List returns an error after repo.MaxListFiles files. +func (repo *ErrorRepo) List(ctx context.Context, t restic.FileType, fn func(restic.ID, int64) error) error { + if repo.MaxListFiles == 0 { + return errors.New("test error, max is zero") + } + + max := repo.MaxListFiles + return repo.Repository.List(ctx, t, func(id restic.ID, size int64) error { + if max == 0 { + return errors.New("test error, max reached zero") + } + + max-- + return fn(id, size) + }) +} + +// ListPack returns an error after repo.MaxPacks files. +func (repo *ErrorRepo) ListPack(ctx context.Context, id restic.ID, size int64) ([]restic.Blob, int64, error) { + repo.MaxPacksMutex.Lock() + max := repo.MaxPacks + if max > 0 { + repo.MaxPacks-- + } + repo.MaxPacksMutex.Unlock() + + if max == 0 { + return nil, 0, errors.New("test list pack error") + } + + return repo.Repository.ListPack(ctx, id, size) +} + +func TestIndexNewListErrors(t *testing.T) { + repo, cleanup := createFilledRepo(t, 3, 0) + defer cleanup() + + for _, max := range []int{0, 3, 5} { + errRepo := &ErrorRepo{ + Repository: repo, + MaxListFiles: max, + } + idx, invalid, err := New(context.TODO(), errRepo, restic.NewIDSet(), nil) + if err == nil { + t.Errorf("expected error not found, got nil") + } + + if idx != nil { + t.Errorf("expected nil index, got %v", idx) + } + + if len(invalid) != 0 { + t.Errorf("expected empty invalid list, got %v", invalid) + } + } +} + +func TestIndexNewPackErrors(t *testing.T) { + repo, cleanup := createFilledRepo(t, 3, 0) + defer cleanup() + + for _, max := range []int{0, 3, 5} { + errRepo := &ErrorRepo{ + Repository: repo, + MaxPacks: max, + } + idx, invalid, err := New(context.TODO(), errRepo, restic.NewIDSet(), nil) + if err == nil { + t.Errorf("expected error not found, got nil") + } + + if idx != nil { + t.Errorf("expected nil index, got %v", idx) + } + + if len(invalid) != 0 { + t.Errorf("expected empty invalid list, got %v", invalid) + } + } +} + +func TestIndexLoad(t *testing.T) { + repo, cleanup := createFilledRepo(t, 3, 0) + defer cleanup() + + loadIdx, err := Load(context.TODO(), repo, nil) + if err != nil { + t.Fatalf("Load() returned error %v", err) + } + + if loadIdx == nil { + t.Fatalf("Load() returned nil index") + } + + validateIndex(t, repo, loadIdx) + + newIdx, _, err := New(context.TODO(), repo, restic.NewIDSet(), nil) + if err != nil { + t.Fatalf("New() returned error %v", err) + } + + if len(loadIdx.Packs) != len(newIdx.Packs) { + t.Errorf("number of packs does not match: want %v, got %v", + len(loadIdx.Packs), len(newIdx.Packs)) + } + + validateIndex(t, repo, newIdx) + + for packID, packNew := range newIdx.Packs { + packLoad, ok := loadIdx.Packs[packID] + + if !ok { + t.Errorf("loaded index does not list pack %v", packID.Str()) + continue + } + + if len(packNew.Entries) != len(packLoad.Entries) { + t.Errorf(" number of entries in pack %v does not match: %d != %d\n %v\n %v", + packID.Str(), len(packNew.Entries), len(packLoad.Entries), + packNew.Entries, packLoad.Entries) + continue + } + + for _, entryNew := range packNew.Entries { + found := false + for _, entryLoad := range packLoad.Entries { + if !entryLoad.ID.Equal(entryNew.ID) { + continue + } + + if entryLoad.Type != entryNew.Type { + continue + } + + if entryLoad.Offset != entryNew.Offset { + continue + } + + if entryLoad.Length != entryNew.Length { + continue + } + + found = true + break + } + + if !found { + t.Errorf("blob not found in loaded index: %v", entryNew) + } + } + } +} + +func BenchmarkIndexNew(b *testing.B) { + repo, cleanup := createFilledRepo(b, 3, 0) + defer cleanup() + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + idx, _, err := New(context.TODO(), repo, restic.NewIDSet(), nil) + + if err != nil { + b.Fatalf("New() returned error %v", err) + } + + if idx == nil { + b.Fatalf("New() returned nil index") + } + b.Logf("idx %v packs", len(idx.Packs)) + } +} + +func BenchmarkIndexSave(b *testing.B) { + repo, cleanup := repository.TestRepository(b) + defer cleanup() + + idx, _, err := New(context.TODO(), repo, restic.NewIDSet(), nil) + test.OK(b, err) + + for i := 0; i < 8000; i++ { + entries := make([]restic.Blob, 0, 200) + for j := 0; j < cap(entries); j++ { + entries = append(entries, restic.Blob{ + ID: restic.NewRandomID(), + Length: 1000, + Offset: 5, + Type: restic.DataBlob, + }) + } + + idx.AddPack(restic.NewRandomID(), 10000, entries) + } + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + ids, err := idx.Save(context.TODO(), repo, nil) + if err != nil { + b.Fatalf("New() returned error %v", err) + } + + b.Logf("saved as %v", ids) + } +} + +func TestIndexDuplicateBlobs(t *testing.T) { + repo, cleanup := createFilledRepo(t, 3, 0.05) + defer cleanup() + + idx, _, err := New(context.TODO(), repo, restic.NewIDSet(), nil) + if err != nil { + t.Fatal(err) + } + + dups := idx.DuplicateBlobs() + if len(dups) == 0 { + t.Errorf("no duplicate blobs found") + } + t.Logf("%d packs, %d duplicate blobs", len(idx.Packs), len(dups)) + + packs := idx.PacksForBlobs(dups) + if len(packs) == 0 { + t.Errorf("no packs with duplicate blobs found") + } + t.Logf("%d packs with duplicate blobs", len(packs)) +} + +func loadIndex(t testing.TB, repo restic.Repository) *Index { + idx, err := Load(context.TODO(), repo, nil) + if err != nil { + t.Fatalf("Load() returned error %v", err) + } + + return idx +} + +func TestIndexSave(t *testing.T) { + repo, cleanup := createFilledRepo(t, 3, 0) + defer cleanup() + + idx := loadIndex(t, repo) + + ids, err := idx.Save(context.TODO(), repo, idx.IndexIDs.List()) + if err != nil { + t.Fatalf("unable to save new index: %v", err) + } + + t.Logf("new index saved as %v", ids) + + for id := range idx.IndexIDs { + t.Logf("remove index %v", id.Str()) + h := restic.Handle{Type: restic.IndexFile, Name: id.String()} + err = repo.Backend().Remove(context.TODO(), h) + if err != nil { + t.Errorf("error removing index %v: %v", id, err) + } + } + + idx2 := loadIndex(t, repo) + t.Logf("load new index with %d packs", len(idx2.Packs)) + + checker := checker.New(repo) + hints, errs := checker.LoadIndex(context.TODO()) + for _, h := range hints { + t.Logf("hint: %v\n", h) + } + + for _, err := range errs { + t.Errorf("checker found error: %v", err) + } + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + errCh := make(chan error) + go checker.Structure(ctx, errCh) + i := 0 + for err := range errCh { + t.Errorf("checker returned error: %v", err) + i++ + if i == 10 { + t.Errorf("more than 10 errors returned, skipping the rest") + cancel() + break + } + } +} + +func TestIndexAddRemovePack(t *testing.T) { + repo, cleanup := createFilledRepo(t, 3, 0) + defer cleanup() + + idx, err := Load(context.TODO(), repo, nil) + if err != nil { + t.Fatalf("Load() returned error %v", err) + } + + var packID restic.ID + err = repo.List(context.TODO(), restic.DataFile, func(id restic.ID, size int64) error { + packID = id + return nil + }) + if err != nil { + t.Fatal(err) + } + + t.Logf("selected pack %v", packID.Str()) + + blobs := idx.Packs[packID].Entries + + idx.RemovePack(packID) + + if _, ok := idx.Packs[packID]; ok { + t.Errorf("removed pack %v found in index.Packs", packID.Str()) + } + + for _, blob := range blobs { + h := restic.BlobHandle{ID: blob.ID, Type: blob.Type} + _, err := idx.FindBlob(h) + if err == nil { + t.Errorf("removed blob %v found in index", h) + } + } +} + +// example index serialization from doc/Design.rst +var docExample = []byte(` +{ + "supersedes": [ + "ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452" + ], + "packs": [ + { + "id": "73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c", + "blobs": [ + { + "id": "3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce", + "type": "data", + "offset": 0, + "length": 25 + },{ + "id": "9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae", + "type": "tree", + "offset": 38, + "length": 100 + }, + { + "id": "d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66", + "type": "data", + "offset": 150, + "length": 123 + } + ] + } + ] +} +`) + +func TestIndexLoadDocReference(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + id, err := repo.SaveUnpacked(context.TODO(), restic.IndexFile, docExample) + if err != nil { + t.Fatalf("SaveUnpacked() returned error %v", err) + } + + t.Logf("index saved as %v", id.Str()) + + idx := loadIndex(t, repo) + + blobID := restic.TestParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66") + locs, err := idx.FindBlob(restic.BlobHandle{ID: blobID, Type: restic.DataBlob}) + if err != nil { + t.Errorf("FindBlob() returned error %v", err) + } + + if len(locs) != 1 { + t.Errorf("blob found %d times, expected just one", len(locs)) + } + + l := locs[0] + if !l.ID.Equal(blobID) { + t.Errorf("blob IDs are not equal: %v != %v", l.ID, blobID) + } + + if l.Type != restic.DataBlob { + t.Errorf("want type %v, got %v", restic.DataBlob, l.Type) + } + + if l.Offset != 150 { + t.Errorf("wrong offset, want %d, got %v", 150, l.Offset) + } + + if l.Length != 123 { + t.Errorf("wrong length, want %d, got %v", 123, l.Length) + } +} diff --git a/internal/limiter/limiter.go b/internal/limiter/limiter.go new file mode 100644 index 000000000..410bc7f64 --- /dev/null +++ b/internal/limiter/limiter.go @@ -0,0 +1,25 @@ +package limiter + +import ( + "io" + "net/http" +) + +// Limiter defines an interface that implementors can use to rate limit I/O +// according to some policy defined and configured by the implementor. +type Limiter interface { + // Upstream returns a rate limited reader that is intended to be used in + // uploads. + Upstream(r io.Reader) io.Reader + + // UpstreamWriter returns a rate limited writer that is intended to be used + // in uploads. + UpstreamWriter(w io.Writer) io.Writer + + // Downstream returns a rate limited reader that is intended to be used + // for downloads. + Downstream(r io.Reader) io.Reader + + // Transport returns an http.RoundTripper limited with the limiter. + Transport(http.RoundTripper) http.RoundTripper +} diff --git a/internal/limiter/limiter_backend.go b/internal/limiter/limiter_backend.go new file mode 100644 index 000000000..b2351a8fd --- /dev/null +++ b/internal/limiter/limiter_backend.go @@ -0,0 +1,68 @@ +package limiter + +import ( + "context" + "io" + + "github.com/restic/restic/internal/restic" +) + +// LimitBackend wraps a Backend and applies rate limiting to Load() and Save() +// calls on the backend. +func LimitBackend(be restic.Backend, l Limiter) restic.Backend { + return rateLimitedBackend{ + Backend: be, + limiter: l, + } +} + +type rateLimitedBackend struct { + restic.Backend + limiter Limiter +} + +func (r rateLimitedBackend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error { + limited := limitedRewindReader{ + RewindReader: rd, + limited: r.limiter.Upstream(rd), + } + + return r.Backend.Save(ctx, h, limited) +} + +type limitedRewindReader struct { + restic.RewindReader + + limited io.Reader +} + +func (l limitedRewindReader) Read(b []byte) (int, error) { + return l.limited.Read(b) +} + +func (r rateLimitedBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64, consumer func(rd io.Reader) error) error { + return r.Backend.Load(ctx, h, length, offset, func(rd io.Reader) error { + lrd := limitedReadCloser{ + limited: r.limiter.Downstream(rd), + } + return consumer(lrd) + }) +} + +type limitedReadCloser struct { + original io.ReadCloser + limited io.Reader +} + +func (l limitedReadCloser) Read(b []byte) (n int, err error) { + return l.limited.Read(b) +} + +func (l limitedReadCloser) Close() error { + if l.original == nil { + return nil + } + return l.original.Close() +} + +var _ restic.Backend = (*rateLimitedBackend)(nil) diff --git a/internal/limiter/static_limiter.go b/internal/limiter/static_limiter.go new file mode 100644 index 000000000..5df7a84da --- /dev/null +++ b/internal/limiter/static_limiter.go @@ -0,0 +1,98 @@ +package limiter + +import ( + "io" + "net/http" + + "github.com/juju/ratelimit" +) + +type staticLimiter struct { + upstream *ratelimit.Bucket + downstream *ratelimit.Bucket +} + +// NewStaticLimiter constructs a Limiter with a fixed (static) upload and +// download rate cap +func NewStaticLimiter(uploadKb, downloadKb int) Limiter { + var ( + upstreamBucket *ratelimit.Bucket + downstreamBucket *ratelimit.Bucket + ) + + if uploadKb > 0 { + upstreamBucket = ratelimit.NewBucketWithRate(toByteRate(uploadKb), int64(toByteRate(uploadKb))) + } + + if downloadKb > 0 { + downstreamBucket = ratelimit.NewBucketWithRate(toByteRate(downloadKb), int64(toByteRate(downloadKb))) + } + + return staticLimiter{ + upstream: upstreamBucket, + downstream: downstreamBucket, + } +} + +func (l staticLimiter) Upstream(r io.Reader) io.Reader { + return l.limitReader(r, l.upstream) +} + +func (l staticLimiter) UpstreamWriter(w io.Writer) io.Writer { + return l.limitWriter(w, l.upstream) +} + +func (l staticLimiter) Downstream(r io.Reader) io.Reader { + return l.limitReader(r, l.downstream) +} + +type roundTripper func(*http.Request) (*http.Response, error) + +func (rt roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + return rt(req) +} + +func (l staticLimiter) roundTripper(rt http.RoundTripper, req *http.Request) (*http.Response, error) { + if req.Body != nil { + req.Body = limitedReadCloser{ + limited: l.Upstream(req.Body), + original: req.Body, + } + } + + res, err := rt.RoundTrip(req) + + if res != nil && res.Body != nil { + res.Body = limitedReadCloser{ + limited: l.Downstream(res.Body), + original: res.Body, + } + } + + return res, err +} + +// Transport returns an HTTP transport limited with the limiter l. +func (l staticLimiter) Transport(rt http.RoundTripper) http.RoundTripper { + return roundTripper(func(req *http.Request) (*http.Response, error) { + return l.roundTripper(rt, req) + }) +} + +func (l staticLimiter) limitReader(r io.Reader, b *ratelimit.Bucket) io.Reader { + if b == nil { + return r + } + return ratelimit.Reader(r, b) +} + +func (l staticLimiter) limitWriter(w io.Writer, b *ratelimit.Bucket) io.Writer { + if b == nil { + return w + } + return ratelimit.Writer(w, b) +} + +func toByteRate(val int) float64 { + return float64(val) * 1024. +} diff --git a/internal/migrations/doc.go b/internal/migrations/doc.go new file mode 100644 index 000000000..0c757fcf4 --- /dev/null +++ b/internal/migrations/doc.go @@ -0,0 +1,2 @@ +// Package migrations contains migrations that can be applied to a repository and/or backend. +package migrations diff --git a/internal/migrations/interface.go b/internal/migrations/interface.go new file mode 100644 index 000000000..9d9eedba1 --- /dev/null +++ b/internal/migrations/interface.go @@ -0,0 +1,22 @@ +package migrations + +import ( + "context" + + "github.com/restic/restic/internal/restic" +) + +// Migration implements a data migration. +type Migration interface { + // Check returns true if the migration can be applied to a repo. + Check(context.Context, restic.Repository) (bool, error) + + // Apply runs the migration. + Apply(context.Context, restic.Repository) error + + // Name returns a short name. + Name() string + + // Descr returns a description what the migration does. + Desc() string +} diff --git a/internal/migrations/list.go b/internal/migrations/list.go new file mode 100644 index 000000000..4442f343c --- /dev/null +++ b/internal/migrations/list.go @@ -0,0 +1,8 @@ +package migrations + +// All contains all migrations. +var All []Migration + +func register(m Migration) { + All = append(All, m) +} diff --git a/internal/migrations/s3_layout.go b/internal/migrations/s3_layout.go new file mode 100644 index 000000000..9dbf8dfa3 --- /dev/null +++ b/internal/migrations/s3_layout.go @@ -0,0 +1,117 @@ +package migrations + +import ( + "context" + "fmt" + "os" + "path" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/backend/s3" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" +) + +func init() { + register(&S3Layout{}) +} + +// S3Layout migrates a repository on an S3 backend from the "s3legacy" to the +// "default" layout. +type S3Layout struct{} + +// Check tests whether the migration can be applied. +func (m *S3Layout) Check(ctx context.Context, repo restic.Repository) (bool, error) { + be, ok := repo.Backend().(*s3.Backend) + if !ok { + debug.Log("backend is not s3") + return false, nil + } + + if be.Layout.Name() != "s3legacy" { + debug.Log("layout is not s3legacy") + return false, nil + } + + return true, nil +} + +func retry(max int, fail func(err error), f func() error) error { + var err error + for i := 0; i < max; i++ { + err = f() + if err == nil { + return err + } + if fail != nil { + fail(err) + } + } + return err +} + +// maxErrors for retrying renames on s3. +const maxErrors = 20 + +func (m *S3Layout) moveFiles(ctx context.Context, be *s3.Backend, l backend.Layout, t restic.FileType) error { + printErr := func(err error) { + fmt.Fprintf(os.Stderr, "renaming file returned error: %v\n", err) + } + + return be.List(ctx, t, func(fi restic.FileInfo) error { + h := restic.Handle{Type: t, Name: fi.Name} + debug.Log("move %v", h) + + return retry(maxErrors, printErr, func() error { + return be.Rename(h, l) + }) + }) +} + +// Apply runs the migration. +func (m *S3Layout) Apply(ctx context.Context, repo restic.Repository) error { + be, ok := repo.Backend().(*s3.Backend) + if !ok { + debug.Log("backend is not s3") + return errors.New("backend is not s3") + } + + oldLayout := &backend.S3LegacyLayout{ + Path: be.Path(), + Join: path.Join, + } + + newLayout := &backend.DefaultLayout{ + Path: be.Path(), + Join: path.Join, + } + + be.Layout = oldLayout + + for _, t := range []restic.FileType{ + restic.SnapshotFile, + restic.DataFile, + restic.KeyFile, + restic.LockFile, + } { + err := m.moveFiles(ctx, be, newLayout, t) + if err != nil { + return err + } + } + + be.Layout = newLayout + + return nil +} + +// Name returns the name for this migration. +func (m *S3Layout) Name() string { + return "s3_layout" +} + +// Desc returns a short description what the migration does. +func (m *S3Layout) Desc() string { + return "move files from 's3legacy' to the 'default' repository layout" +} diff --git a/internal/mock/backend.go b/internal/mock/backend.go new file mode 100644 index 000000000..930fdb3ee --- /dev/null +++ b/internal/mock/backend.go @@ -0,0 +1,136 @@ +package mock + +import ( + "context" + "io" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" +) + +// Backend implements a mock backend. +type Backend struct { + CloseFn func() error + IsNotExistFn func(err error) bool + SaveFn func(ctx context.Context, h restic.Handle, rd restic.RewindReader) error + OpenReaderFn func(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) + StatFn func(ctx context.Context, h restic.Handle) (restic.FileInfo, error) + ListFn func(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error + RemoveFn func(ctx context.Context, h restic.Handle) error + TestFn func(ctx context.Context, h restic.Handle) (bool, error) + DeleteFn func(ctx context.Context) error + LocationFn func() string +} + +// NewBackend returns new mock Backend instance +func NewBackend() *Backend { + be := &Backend{} + return be +} + +// Close the backend. +func (m *Backend) Close() error { + if m.CloseFn == nil { + return nil + } + + return m.CloseFn() +} + +// Location returns a location string. +func (m *Backend) Location() string { + if m.LocationFn == nil { + return "" + } + + return m.LocationFn() +} + +// IsNotExist returns true if the error is caused by a missing file. +func (m *Backend) IsNotExist(err error) bool { + if m.IsNotExistFn == nil { + return false + } + + return m.IsNotExistFn(err) +} + +// Save data in the backend. +func (m *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error { + if m.SaveFn == nil { + return errors.New("not implemented") + } + + return m.SaveFn(ctx, h, rd) +} + +// Load runs fn with a reader that yields the contents of the file at h at the +// given offset. +func (m *Backend) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error { + rd, err := m.openReader(ctx, h, length, offset) + if err != nil { + return err + } + err = fn(rd) + if err != nil { + rd.Close() // ignore secondary errors closing the reader + return err + } + return rd.Close() +} + +func (m *Backend) openReader(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) { + if m.OpenReaderFn == nil { + return nil, errors.New("not implemented") + } + + return m.OpenReaderFn(ctx, h, length, offset) +} + +// Stat an object in the backend. +func (m *Backend) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) { + if m.StatFn == nil { + return restic.FileInfo{}, errors.New("not implemented") + } + + return m.StatFn(ctx, h) +} + +// List items of type t. +func (m *Backend) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error { + if m.ListFn == nil { + return nil + } + + return m.ListFn(ctx, t, fn) +} + +// Remove data from the backend. +func (m *Backend) Remove(ctx context.Context, h restic.Handle) error { + if m.RemoveFn == nil { + return errors.New("not implemented") + } + + return m.RemoveFn(ctx, h) +} + +// Test for the existence of a specific item. +func (m *Backend) Test(ctx context.Context, h restic.Handle) (bool, error) { + if m.TestFn == nil { + return false, errors.New("not implemented") + } + + return m.TestFn(ctx, h) +} + +// Delete all data. +func (m *Backend) Delete(ctx context.Context) error { + if m.DeleteFn == nil { + return errors.New("not implemented") + } + + return m.DeleteFn(ctx) +} + +// Make sure that Backend implements the backend interface. +var _ restic.Backend = &Backend{} diff --git a/internal/mock/repository.go b/internal/mock/repository.go new file mode 100644 index 000000000..c3a9f0f9f --- /dev/null +++ b/internal/mock/repository.go @@ -0,0 +1,141 @@ +package mock + +import ( + "github.com/restic/restic/internal/crypto" + "github.com/restic/restic/internal/restic" +) + +// Repository implements a mock Repository. +type Repository struct { + BackendFn func() restic.Backend + + KeyFn func() *crypto.Key + + SetIndexFn func(restic.Index) error + + IndexFn func() restic.Index + SaveFullIndexFn func() error + SaveIndexFn func() error + LoadIndexFn func() error + + ConfigFn func() restic.Config + + LookupBlobSizeFn func(restic.ID, restic.BlobType) (uint, error) + + ListFn func(restic.FileType, <-chan struct{}) <-chan restic.ID + ListPackFn func(restic.ID) ([]restic.Blob, int64, error) + + FlushFn func() error + + SaveUnpackedFn func(restic.FileType, []byte) (restic.ID, error) + SaveJSONUnpackedFn func(restic.FileType, interface{}) (restic.ID, error) + + LoadJSONUnpackedFn func(restic.FileType, restic.ID, interface{}) error + LoadAndDecryptFn func(restic.FileType, restic.ID) ([]byte, error) + + LoadBlobFn func(restic.BlobType, restic.ID, []byte) (int, error) + SaveBlobFn func(restic.BlobType, []byte, restic.ID) (restic.ID, error) + + LoadTreeFn func(restic.ID) (*restic.Tree, error) + SaveTreeFn func(t *restic.Tree) (restic.ID, error) +} + +// Backend is a stub method. +func (repo Repository) Backend() restic.Backend { + return repo.BackendFn() +} + +// Key is a stub method. +func (repo Repository) Key() *crypto.Key { + return repo.KeyFn() +} + +// SetIndex is a stub method. +func (repo Repository) SetIndex(idx restic.Index) error { + return repo.SetIndexFn(idx) +} + +// Index is a stub method. +func (repo Repository) Index() restic.Index { + return repo.IndexFn() +} + +// SaveFullIndex is a stub method. +func (repo Repository) SaveFullIndex() error { + return repo.SaveFullIndexFn() +} + +// SaveIndex is a stub method. +func (repo Repository) SaveIndex() error { + return repo.SaveIndexFn() +} + +// LoadIndex is a stub method. +func (repo Repository) LoadIndex() error { + return repo.LoadIndexFn() +} + +// Config is a stub method. +func (repo Repository) Config() restic.Config { + return repo.ConfigFn() +} + +// LookupBlobSize is a stub method. +func (repo Repository) LookupBlobSize(id restic.ID, t restic.BlobType) (uint, error) { + return repo.LookupBlobSizeFn(id, t) +} + +// List is a stub method. +func (repo Repository) List(t restic.FileType, done <-chan struct{}) <-chan restic.ID { + return repo.ListFn(t, done) +} + +// ListPack is a stub method. +func (repo Repository) ListPack(id restic.ID) ([]restic.Blob, int64, error) { + return repo.ListPackFn(id) +} + +// Flush is a stub method. +func (repo Repository) Flush() error { + return repo.FlushFn() +} + +// SaveUnpacked is a stub method. +func (repo Repository) SaveUnpacked(t restic.FileType, buf []byte) (restic.ID, error) { + return repo.SaveUnpackedFn(t, buf) +} + +// SaveJSONUnpacked is a stub method. +func (repo Repository) SaveJSONUnpacked(t restic.FileType, item interface{}) (restic.ID, error) { + return repo.SaveJSONUnpackedFn(t, item) +} + +// LoadJSONUnpacked is a stub method. +func (repo Repository) LoadJSONUnpacked(t restic.FileType, id restic.ID, item interface{}) error { + return repo.LoadJSONUnpackedFn(t, id, item) +} + +// LoadAndDecrypt is a stub method. +func (repo Repository) LoadAndDecrypt(t restic.FileType, id restic.ID) ([]byte, error) { + return repo.LoadAndDecryptFn(t, id) +} + +// LoadBlob is a stub method. +func (repo Repository) LoadBlob(t restic.BlobType, id restic.ID, buf []byte) (int, error) { + return repo.LoadBlobFn(t, id, buf) +} + +// SaveBlob is a stub method. +func (repo Repository) SaveBlob(t restic.BlobType, buf []byte, id restic.ID) (restic.ID, error) { + return repo.SaveBlobFn(t, buf, id) +} + +// LoadTree is a stub method. +func (repo Repository) LoadTree(id restic.ID) (*restic.Tree, error) { + return repo.LoadTreeFn(id) +} + +// SaveTree is a stub method. +func (repo Repository) SaveTree(t *restic.Tree) (restic.ID, error) { + return repo.SaveTreeFn(t) +} diff --git a/internal/options/options.go b/internal/options/options.go new file mode 100644 index 000000000..2a3be2254 --- /dev/null +++ b/internal/options/options.go @@ -0,0 +1,218 @@ +package options + +import ( + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/restic/restic/internal/errors" +) + +// Options holds options in the form key=value. +type Options map[string]string + +var opts []Help + +// Register allows registering options so that they can be listed with List. +func Register(ns string, cfg interface{}) { + opts = appendAllOptions(opts, ns, cfg) +} + +// List returns a list of all registered options (using Register()). +func List() (list []Help) { + list = make([]Help, 0, len(opts)) + for _, opt := range opts { + list = append(list, opt) + } + return list +} + +// appendAllOptions appends all options in cfg to opts, sorted by namespace. +func appendAllOptions(opts []Help, ns string, cfg interface{}) []Help { + for _, opt := range listOptions(cfg) { + opt.Namespace = ns + opts = append(opts, opt) + } + + sort.Sort(helpList(opts)) + return opts +} + +// listOptions returns a list of options of cfg. +func listOptions(cfg interface{}) (opts []Help) { + // resolve indirection if cfg is a pointer + v := reflect.Indirect(reflect.ValueOf(cfg)) + + for i := 0; i < v.NumField(); i++ { + f := v.Type().Field(i) + + h := Help{ + Name: f.Tag.Get("option"), + Text: f.Tag.Get("help"), + } + + if h.Name == "" { + continue + } + + opts = append(opts, h) + } + + return opts +} + +// Help contains information about an option. +type Help struct { + Namespace string + Name string + Text string +} + +type helpList []Help + +// Len is the number of elements in the collection. +func (h helpList) Len() int { + return len(h) +} + +// Less reports whether the element with +// index i should sort before the element with index j. +func (h helpList) Less(i, j int) bool { + if h[i].Namespace == h[j].Namespace { + return h[i].Name < h[j].Name + } + + return h[i].Namespace < h[j].Namespace +} + +// Swap swaps the elements with indexes i and j. +func (h helpList) Swap(i, j int) { + h[i], h[j] = h[j], h[i] +} + +// splitKeyValue splits at the first equals (=) sign. +func splitKeyValue(s string) (key string, value string) { + data := strings.SplitN(s, "=", 2) + key = strings.ToLower(strings.TrimSpace(data[0])) + if len(data) == 1 { + // no equals sign is treated as the empty value + return key, "" + } + + return key, strings.TrimSpace(data[1]) +} + +// Parse takes a slice of key=value pairs and returns an Options type. +// The key may include namespaces, separated by dots. Example: "foo.bar=value". +// Keys are converted to lower-case. +func Parse(in []string) (Options, error) { + opts := make(Options, len(in)) + + for _, opt := range in { + key, value := splitKeyValue(opt) + + if key == "" { + return Options{}, errors.Fatalf("empty key is not a valid option") + } + + if v, ok := opts[key]; ok && v != value { + return Options{}, errors.Fatalf("key %q present more than once", key) + } + + opts[key] = value + } + + return opts, nil +} + +// Extract returns an Options type with all keys in namespace ns, which is +// also stripped from the keys. ns must end with a dot. +func (o Options) Extract(ns string) Options { + l := len(ns) + if ns[l-1] != '.' { + ns += "." + l++ + } + + opts := make(Options) + + for k, v := range o { + if !strings.HasPrefix(k, ns) { + continue + } + + opts[k[l:]] = v + } + + return opts +} + +// Apply sets the options on dst via reflection, using the struct tag `option`. +// The namespace argument (ns) is only used for error messages. +func (o Options) Apply(ns string, dst interface{}) error { + v := reflect.ValueOf(dst).Elem() + + fields := make(map[string]reflect.StructField) + + for i := 0; i < v.NumField(); i++ { + f := v.Type().Field(i) + tag := f.Tag.Get("option") + + if tag == "" { + continue + } + + if _, ok := fields[tag]; ok { + panic("option tag " + tag + " is not unique in " + v.Type().Name()) + } + + fields[tag] = f + } + + for key, value := range o { + field, ok := fields[key] + if !ok { + if ns != "" { + key = ns + "." + key + } + return errors.Fatalf("option %v is not known", key) + } + + i := field.Index[0] + switch v.Type().Field(i).Type.Name() { + case "string": + v.Field(i).SetString(value) + + case "int": + vi, err := strconv.ParseInt(value, 0, 32) + if err != nil { + return err + } + + v.Field(i).SetInt(vi) + + case "uint": + vi, err := strconv.ParseUint(value, 0, 32) + if err != nil { + return err + } + + v.Field(i).SetUint(vi) + + case "Duration": + d, err := time.ParseDuration(value) + if err != nil { + return err + } + + v.Field(i).SetInt(int64(d)) + + default: + panic("type " + v.Type().Field(i).Type.Name() + " not handled") + } + } + + return nil +} diff --git a/internal/options/options_test.go b/internal/options/options_test.go new file mode 100644 index 000000000..de94fc90a --- /dev/null +++ b/internal/options/options_test.go @@ -0,0 +1,312 @@ +package options + +import ( + "fmt" + "reflect" + "testing" + "time" +) + +var optsTests = []struct { + input []string + output Options +}{ + { + []string{"foo=bar", "bar=baz ", "k="}, + Options{ + "foo": "bar", + "bar": "baz", + "k": "", + }, + }, + { + []string{"Foo=23", "baR", "k=thing with spaces"}, + Options{ + "foo": "23", + "bar": "", + "k": "thing with spaces", + }, + }, + { + []string{"k=thing with spaces", "k2=more spaces = not evil"}, + Options{ + "k": "thing with spaces", + "k2": "more spaces = not evil", + }, + }, + { + []string{"x=1", "foo=bar", "y=2", "foo=bar"}, + Options{ + "x": "1", + "y": "2", + "foo": "bar", + }, + }, +} + +func TestParseOptions(t *testing.T) { + for i, test := range optsTests { + t.Run(fmt.Sprintf("test-%v", i), func(t *testing.T) { + opts, err := Parse(test.input) + if err != nil { + t.Fatalf("unable to parse options: %v", err) + } + + if !reflect.DeepEqual(opts, test.output) { + t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", test.output, opts) + } + }) + } +} + +var invalidOptsTests = []struct { + input []string + err string +}{ + { + []string{"=bar", "bar=baz", "k="}, + "Fatal: empty key is not a valid option", + }, + { + []string{"x=1", "foo=bar", "y=2", "foo=baz"}, + `Fatal: key "foo" present more than once`, + }, +} + +func TestParseInvalidOptions(t *testing.T) { + for _, test := range invalidOptsTests { + t.Run(test.err, func(t *testing.T) { + _, err := Parse(test.input) + if err == nil { + t.Fatalf("expected error (%v) not found, err is nil", test.err) + } + + if err.Error() != test.err { + t.Fatalf("expected error %q, got %q", test.err, err.Error()) + } + }) + } +} + +var extractTests = []struct { + input Options + ns string + output Options +}{ + { + input: Options{ + "foo.bar:": "baz", + "s3.timeout": "10s", + "sftp.timeout": "5s", + "global": "foobar", + }, + ns: "s3", + output: Options{ + "timeout": "10s", + }, + }, +} + +func TestOptionsExtract(t *testing.T) { + for _, test := range extractTests { + t.Run(test.ns, func(t *testing.T) { + opts := test.input.Extract(test.ns) + + if !reflect.DeepEqual(opts, test.output) { + t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", test.output, opts) + } + }) + } +} + +// Target is used for Apply() tests +type Target struct { + Name string `option:"name"` + ID int `option:"id"` + Timeout time.Duration `option:"timeout"` + Other string +} + +var setTests = []struct { + input Options + output Target +}{ + { + Options{ + "name": "foobar", + }, + Target{ + Name: "foobar", + }, + }, + { + Options{ + "name": "foobar", + "id": "1234", + }, + Target{ + Name: "foobar", + ID: 1234, + }, + }, + { + Options{ + "timeout": "10m3s", + }, + Target{ + Timeout: time.Duration(10*time.Minute + 3*time.Second), + }, + }, +} + +func TestOptionsApply(t *testing.T) { + for i, test := range setTests { + t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) { + var dst Target + err := test.input.Apply("", &dst) + if err != nil { + t.Fatal(err) + } + + if dst != test.output { + t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", test.output, dst) + } + }) + } +} + +var invalidSetTests = []struct { + input Options + namespace string + err string +}{ + { + Options{ + "first_name": "foobar", + }, + "ns", + "Fatal: option ns.first_name is not known", + }, + { + Options{ + "id": "foobar", + }, + "ns", + `strconv.ParseInt: parsing "foobar": invalid syntax`, + }, + { + Options{ + "timeout": "2134", + }, + "ns", + `time: missing unit in duration 2134`, + }, +} + +func TestOptionsApplyInvalid(t *testing.T) { + for i, test := range invalidSetTests { + t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) { + var dst Target + err := test.input.Apply(test.namespace, &dst) + if err == nil { + t.Fatalf("expected error %v not found", test.err) + } + + if err.Error() != test.err { + t.Fatalf("expected error %q, got %q", test.err, err.Error()) + } + }) + } +} + +func TestListOptions(t *testing.T) { + var teststruct = struct { + Foo string `option:"foo" help:"bar text help"` + }{} + + var tests = []struct { + cfg interface{} + opts []Help + }{ + { + struct { + Foo string `option:"foo" help:"bar text help"` + }{}, + []Help{ + {Name: "foo", Text: "bar text help"}, + }, + }, + { + struct { + Foo string `option:"foo" help:"bar text help"` + Bar string `option:"bar" help:"bar text help"` + }{}, + []Help{ + {Name: "foo", Text: "bar text help"}, + {Name: "bar", Text: "bar text help"}, + }, + }, + { + struct { + Bar string `option:"bar" help:"bar text help"` + Foo string `option:"foo" help:"bar text help"` + }{}, + []Help{ + {Name: "bar", Text: "bar text help"}, + {Name: "foo", Text: "bar text help"}, + }, + }, + { + &teststruct, + []Help{ + {Name: "foo", Text: "bar text help"}, + }, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + opts := listOptions(test.cfg) + if !reflect.DeepEqual(opts, test.opts) { + t.Fatalf("wrong opts, want:\n %v\ngot:\n %v", test.opts, opts) + } + }) + } +} + +func TestAppendAllOptions(t *testing.T) { + var tests = []struct { + cfgs map[string]interface{} + opts []Help + }{ + { + map[string]interface{}{ + "local": struct { + Foo string `option:"foo" help:"bar text help"` + }{}, + "sftp": struct { + Foo string `option:"foo" help:"bar text help2"` + Bar string `option:"bar" help:"bar text help"` + }{}, + }, + []Help{ + {Namespace: "local", Name: "foo", Text: "bar text help"}, + {Namespace: "sftp", Name: "bar", Text: "bar text help"}, + {Namespace: "sftp", Name: "foo", Text: "bar text help2"}, + }, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + var opts []Help + for ns, cfg := range test.cfgs { + opts = appendAllOptions(opts, ns, cfg) + } + + if !reflect.DeepEqual(opts, test.opts) { + t.Fatalf("wrong list, want:\n %v\ngot:\n %v", test.opts, opts) + } + }) + } +} diff --git a/internal/pack/doc.go b/internal/pack/doc.go new file mode 100644 index 000000000..025e84737 --- /dev/null +++ b/internal/pack/doc.go @@ -0,0 +1,2 @@ +// Package pack provides functions for combining and parsing pack files. +package pack diff --git a/internal/pack/pack.go b/internal/pack/pack.go new file mode 100644 index 000000000..5bf046304 --- /dev/null +++ b/internal/pack/pack.go @@ -0,0 +1,327 @@ +package pack + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "sync" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + + "github.com/restic/restic/internal/crypto" +) + +// Packer is used to create a new Pack. +type Packer struct { + blobs []restic.Blob + + bytes uint + k *crypto.Key + wr io.Writer + + m sync.Mutex +} + +// NewPacker returns a new Packer that can be used to pack blobs +// together. If wr is nil, a bytes.Buffer is used. +func NewPacker(k *crypto.Key, wr io.Writer) *Packer { + if wr == nil { + wr = bytes.NewBuffer(nil) + } + return &Packer{k: k, wr: wr} +} + +// Add saves the data read from rd as a new blob to the packer. Returned is the +// number of bytes written to the pack. +func (p *Packer) Add(t restic.BlobType, id restic.ID, data []byte) (int, error) { + p.m.Lock() + defer p.m.Unlock() + + c := restic.Blob{Type: t, ID: id} + + n, err := p.wr.Write(data) + c.Length = uint(n) + c.Offset = p.bytes + p.bytes += uint(n) + p.blobs = append(p.blobs, c) + + return n, errors.Wrap(err, "Write") +} + +var entrySize = uint(binary.Size(restic.BlobType(0)) + binary.Size(uint32(0)) + len(restic.ID{})) + +// headerEntry is used with encoding/binary to read and write header entries +type headerEntry struct { + Type uint8 + Length uint32 + ID restic.ID +} + +// Finalize writes the header for all added blobs and finalizes the pack. +// Returned are the number of bytes written, including the header. If the +// underlying writer implements io.Closer, it is closed. +func (p *Packer) Finalize() (uint, error) { + p.m.Lock() + defer p.m.Unlock() + + bytesWritten := p.bytes + + hdrBuf := bytes.NewBuffer(nil) + bytesHeader, err := p.writeHeader(hdrBuf) + if err != nil { + return 0, err + } + + encryptedHeader := make([]byte, 0, hdrBuf.Len()+p.k.Overhead()+p.k.NonceSize()) + nonce := crypto.NewRandomNonce() + encryptedHeader = append(encryptedHeader, nonce...) + encryptedHeader = p.k.Seal(encryptedHeader, nonce, hdrBuf.Bytes(), nil) + + // append the header + n, err := p.wr.Write(encryptedHeader) + if err != nil { + return 0, errors.Wrap(err, "Write") + } + + hdrBytes := restic.CiphertextLength(int(bytesHeader)) + if n != hdrBytes { + return 0, errors.New("wrong number of bytes written") + } + + bytesWritten += uint(hdrBytes) + + // write length + err = binary.Write(p.wr, binary.LittleEndian, uint32(restic.CiphertextLength(len(p.blobs)*int(entrySize)))) + if err != nil { + return 0, errors.Wrap(err, "binary.Write") + } + bytesWritten += uint(binary.Size(uint32(0))) + + p.bytes = uint(bytesWritten) + + if w, ok := p.wr.(io.Closer); ok { + return bytesWritten, w.Close() + } + + return bytesWritten, nil +} + +// writeHeader constructs and writes the header to wr. +func (p *Packer) writeHeader(wr io.Writer) (bytesWritten uint, err error) { + for _, b := range p.blobs { + entry := headerEntry{ + Length: uint32(b.Length), + ID: b.ID, + } + + switch b.Type { + case restic.DataBlob: + entry.Type = 0 + case restic.TreeBlob: + entry.Type = 1 + default: + return 0, errors.Errorf("invalid blob type %v", b.Type) + } + + err := binary.Write(wr, binary.LittleEndian, entry) + if err != nil { + return bytesWritten, errors.Wrap(err, "binary.Write") + } + + bytesWritten += entrySize + } + + return +} + +// Size returns the number of bytes written so far. +func (p *Packer) Size() uint { + p.m.Lock() + defer p.m.Unlock() + + return p.bytes +} + +// Count returns the number of blobs in this packer. +func (p *Packer) Count() int { + p.m.Lock() + defer p.m.Unlock() + + return len(p.blobs) +} + +// Blobs returns the slice of blobs that have been written. +func (p *Packer) Blobs() []restic.Blob { + p.m.Lock() + defer p.m.Unlock() + + return p.blobs +} + +// Writer return the underlying writer. +func (p *Packer) Writer() io.Writer { + return p.wr +} + +func (p *Packer) String() string { + return fmt.Sprintf("", len(p.blobs), p.bytes) +} + +var ( + // size of the header-length field at the end of the file + headerLengthSize = binary.Size(uint32(0)) + // we require at least one entry in the header, and one blob for a pack file + minFileSize = entrySize + crypto.Extension + uint(headerLengthSize) +) + +const ( + maxHeaderSize = 16 * 1024 * 1024 + // number of header enries to download as part of header-length request + eagerEntries = 15 +) + +// readRecords reads up to max records from the underlying ReaderAt, returning +// the raw header, the total number of records in the header, and any error. +// If the header contains fewer than max entries, the header is truncated to +// the appropriate size. +func readRecords(rd io.ReaderAt, size int64, max int) ([]byte, int, error) { + var bufsize int + bufsize += max * int(entrySize) + bufsize += crypto.Extension + bufsize += headerLengthSize + + if bufsize > int(size) { + bufsize = int(size) + } + + b := make([]byte, bufsize) + off := size - int64(bufsize) + if _, err := rd.ReadAt(b, off); err != nil { + return nil, 0, err + } + + hlen := binary.LittleEndian.Uint32(b[len(b)-headerLengthSize:]) + b = b[:len(b)-headerLengthSize] + debug.Log("header length: %v", hlen) + + var err error + switch { + case hlen == 0: + err = InvalidFileError{Message: "header length is zero"} + case hlen < crypto.Extension: + err = InvalidFileError{Message: "header length is too small"} + case (hlen-crypto.Extension)%uint32(entrySize) != 0: + err = InvalidFileError{Message: "header length is invalid"} + case int64(hlen) > size-int64(headerLengthSize): + err = InvalidFileError{Message: "header is larger than file"} + case int64(hlen) > maxHeaderSize: + err = InvalidFileError{Message: "header is larger than maxHeaderSize"} + } + if err != nil { + return nil, 0, errors.Wrap(err, "readHeader") + } + + total := (int(hlen) - crypto.Extension) / int(entrySize) + if total < max { + // truncate to the beginning of the pack header + b = b[len(b)-int(hlen):] + } + + return b, total, nil +} + +// readHeader reads the header at the end of rd. size is the length of the +// whole data accessible in rd. +func readHeader(rd io.ReaderAt, size int64) ([]byte, error) { + debug.Log("size: %v", size) + if size < int64(minFileSize) { + err := InvalidFileError{Message: "file is too small"} + return nil, errors.Wrap(err, "readHeader") + } + + // assuming extra request is significantly slower than extra bytes download, + // eagerly download eagerEntries header entries as part of header-length request. + // only make second request if actual number of entries is greater than eagerEntries + + b, c, err := readRecords(rd, size, eagerEntries) + if err != nil { + return nil, err + } + if c <= eagerEntries { + // eager read sufficed, return what we got + return b, nil + } + b, _, err = readRecords(rd, size, c) + if err != nil { + return nil, err + } + return b, nil +} + +// InvalidFileError is return when a file is found that is not a pack file. +type InvalidFileError struct { + Message string +} + +func (e InvalidFileError) Error() string { + return e.Message +} + +// List returns the list of entries found in a pack file. +func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []restic.Blob, err error) { + buf, err := readHeader(rd, size) + if err != nil { + return nil, err + } + + if len(buf) < k.NonceSize()+k.Overhead() { + return nil, errors.New("invalid header, too small") + } + + nonce, buf := buf[:k.NonceSize()], buf[k.NonceSize():] + buf, err = k.Open(buf[:0], nonce, buf, nil) + if err != nil { + return nil, err + } + + hdrRd := bytes.NewReader(buf) + + entries = make([]restic.Blob, 0, uint(len(buf))/entrySize) + + pos := uint(0) + for { + e := headerEntry{} + err = binary.Read(hdrRd, binary.LittleEndian, &e) + if errors.Cause(err) == io.EOF { + break + } + + if err != nil { + return nil, errors.Wrap(err, "binary.Read") + } + + entry := restic.Blob{ + Length: uint(e.Length), + ID: e.ID, + Offset: pos, + } + + switch e.Type { + case 0: + entry.Type = restic.DataBlob + case 1: + entry.Type = restic.TreeBlob + default: + return nil, errors.Errorf("invalid type %d", e.Type) + } + + entries = append(entries, entry) + + pos += uint(e.Length) + } + + return entries, nil +} diff --git a/internal/pack/pack_internal_test.go b/internal/pack/pack_internal_test.go new file mode 100644 index 000000000..6694b7333 --- /dev/null +++ b/internal/pack/pack_internal_test.go @@ -0,0 +1,110 @@ +package pack + +import ( + "bytes" + "encoding/binary" + "io" + "testing" + + "github.com/restic/restic/internal/crypto" + rtest "github.com/restic/restic/internal/test" +) + +type countingReaderAt struct { + delegate io.ReaderAt + invocationCount int +} + +func (rd *countingReaderAt) ReadAt(p []byte, off int64) (n int, err error) { + rd.invocationCount++ + return rd.delegate.ReadAt(p, off) +} + +func TestReadHeaderEagerLoad(t *testing.T) { + + testReadHeader := func(dataSize, entryCount, expectedReadInvocationCount int) { + expectedHeader := rtest.Random(0, entryCount*int(entrySize)+crypto.Extension) + + buf := &bytes.Buffer{} + buf.Write(rtest.Random(0, dataSize)) // pack blobs data + buf.Write(expectedHeader) // pack header + binary.Write(buf, binary.LittleEndian, uint32(len(expectedHeader))) // pack header length + + rd := &countingReaderAt{delegate: bytes.NewReader(buf.Bytes())} + + header, err := readHeader(rd, int64(buf.Len())) + rtest.OK(t, err) + + rtest.Equals(t, expectedHeader, header) + rtest.Equals(t, expectedReadInvocationCount, rd.invocationCount) + } + + // basic + testReadHeader(100, 1, 1) + + // header entries == eager entries + testReadHeader(100, eagerEntries-1, 1) + testReadHeader(100, eagerEntries, 1) + testReadHeader(100, eagerEntries+1, 2) + + // file size == eager header load size + eagerLoadSize := int((eagerEntries * entrySize) + crypto.Extension) + headerSize := int(1*entrySize) + crypto.Extension + dataSize := eagerLoadSize - headerSize - binary.Size(uint32(0)) + testReadHeader(dataSize-1, 1, 1) + testReadHeader(dataSize, 1, 1) + testReadHeader(dataSize+1, 1, 1) + testReadHeader(dataSize+2, 1, 1) + testReadHeader(dataSize+3, 1, 1) + testReadHeader(dataSize+4, 1, 1) +} + +func TestReadRecords(t *testing.T) { + testReadRecords := func(dataSize, entryCount, totalRecords int) { + totalHeader := rtest.Random(0, totalRecords*int(entrySize)+crypto.Extension) + off := len(totalHeader) - (entryCount*int(entrySize) + crypto.Extension) + if off < 0 { + off = 0 + } + expectedHeader := totalHeader[off:] + + buf := &bytes.Buffer{} + buf.Write(rtest.Random(0, dataSize)) // pack blobs data + buf.Write(totalHeader) // pack header + binary.Write(buf, binary.LittleEndian, uint32(len(totalHeader))) // pack header length + + rd := bytes.NewReader(buf.Bytes()) + + header, count, err := readRecords(rd, int64(rd.Len()), entryCount) + rtest.OK(t, err) + rtest.Equals(t, expectedHeader, header) + rtest.Equals(t, totalRecords, count) + } + + // basic + testReadRecords(100, 1, 1) + testReadRecords(100, 0, 1) + testReadRecords(100, 1, 0) + + // header entries ~ eager entries + testReadRecords(100, eagerEntries, eagerEntries-1) + testReadRecords(100, eagerEntries, eagerEntries) + testReadRecords(100, eagerEntries, eagerEntries+1) + + // file size == eager header load size + eagerLoadSize := int((eagerEntries * entrySize) + crypto.Extension) + headerSize := int(1*entrySize) + crypto.Extension + dataSize := eagerLoadSize - headerSize - binary.Size(uint32(0)) + testReadRecords(dataSize-1, 1, 1) + testReadRecords(dataSize, 1, 1) + testReadRecords(dataSize+1, 1, 1) + testReadRecords(dataSize+2, 1, 1) + testReadRecords(dataSize+3, 1, 1) + testReadRecords(dataSize+4, 1, 1) + + for i := 0; i < 2; i++ { + for j := 0; j < 2; j++ { + testReadRecords(dataSize, i, j) + } + } +} diff --git a/internal/pack/pack_test.go b/internal/pack/pack_test.go new file mode 100644 index 000000000..12e3600bb --- /dev/null +++ b/internal/pack/pack_test.go @@ -0,0 +1,145 @@ +package pack_test + +import ( + "bytes" + "context" + "crypto/rand" + "crypto/sha256" + "encoding/binary" + "encoding/json" + "io" + "testing" + + "github.com/restic/restic/internal/backend/mem" + "github.com/restic/restic/internal/crypto" + "github.com/restic/restic/internal/pack" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +var testLens = []int{23, 31650, 25860, 10928, 13769, 19862, 5211, 127, 13690, 30231} + +type Buf struct { + data []byte + id restic.ID +} + +func newPack(t testing.TB, k *crypto.Key, lengths []int) ([]Buf, []byte, uint) { + bufs := []Buf{} + + for _, l := range lengths { + b := make([]byte, l) + _, err := io.ReadFull(rand.Reader, b) + rtest.OK(t, err) + h := sha256.Sum256(b) + bufs = append(bufs, Buf{data: b, id: h}) + } + + // pack blobs + p := pack.NewPacker(k, nil) + for _, b := range bufs { + p.Add(restic.TreeBlob, b.id, b.data) + } + + _, err := p.Finalize() + rtest.OK(t, err) + + packData := p.Writer().(*bytes.Buffer).Bytes() + return bufs, packData, p.Size() +} + +func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, rd io.ReaderAt, packSize uint) { + written := 0 + for _, buf := range bufs { + written += len(buf.data) + } + // header length + written += binary.Size(uint32(0)) + // header + header crypto + headerSize := len(bufs) * (binary.Size(restic.BlobType(0)) + binary.Size(uint32(0)) + len(restic.ID{})) + written += restic.CiphertextLength(headerSize) + + // check length + rtest.Equals(t, uint(written), packSize) + + // read and parse it again + entries, err := pack.List(k, rd, int64(packSize)) + rtest.OK(t, err) + rtest.Equals(t, len(entries), len(bufs)) + + var buf []byte + for i, b := range bufs { + e := entries[i] + rtest.Equals(t, b.id, e.ID) + + if len(buf) < int(e.Length) { + buf = make([]byte, int(e.Length)) + } + buf = buf[:int(e.Length)] + n, err := rd.ReadAt(buf, int64(e.Offset)) + rtest.OK(t, err) + buf = buf[:n] + + rtest.Assert(t, bytes.Equal(b.data, buf), + "data for blob %v doesn't match", i) + } +} + +func TestCreatePack(t *testing.T) { + // create random keys + k := crypto.NewRandomKey() + + bufs, packData, packSize := newPack(t, k, testLens) + rtest.Equals(t, uint(len(packData)), packSize) + verifyBlobs(t, bufs, k, bytes.NewReader(packData), packSize) +} + +var blobTypeJSON = []struct { + t restic.BlobType + res string +}{ + {restic.DataBlob, `"data"`}, + {restic.TreeBlob, `"tree"`}, +} + +func TestBlobTypeJSON(t *testing.T) { + for _, test := range blobTypeJSON { + // test serialize + buf, err := json.Marshal(test.t) + rtest.OK(t, err) + rtest.Equals(t, test.res, string(buf)) + + // test unserialize + var v restic.BlobType + err = json.Unmarshal([]byte(test.res), &v) + rtest.OK(t, err) + rtest.Equals(t, test.t, v) + } +} + +func TestUnpackReadSeeker(t *testing.T) { + // create random keys + k := crypto.NewRandomKey() + + bufs, packData, packSize := newPack(t, k, testLens) + + b := mem.New() + id := restic.Hash(packData) + + handle := restic.Handle{Type: restic.DataFile, Name: id.String()} + rtest.OK(t, b.Save(context.TODO(), handle, restic.NewByteReader(packData))) + verifyBlobs(t, bufs, k, restic.ReaderAt(b, handle), packSize) +} + +func TestShortPack(t *testing.T) { + k := crypto.NewRandomKey() + + bufs, packData, packSize := newPack(t, k, []int{23}) + + b := mem.New() + id := restic.Hash(packData) + + handle := restic.Handle{Type: restic.DataFile, Name: id.String()} + rtest.OK(t, b.Save(context.TODO(), handle, restic.NewByteReader(packData))) + verifyBlobs(t, bufs, k, restic.ReaderAt(b, handle), packSize) +} diff --git a/internal/repository/doc.go b/internal/repository/doc.go new file mode 100644 index 000000000..cb98334c4 --- /dev/null +++ b/internal/repository/doc.go @@ -0,0 +1,28 @@ +// Package repository implements a restic repository on top of a backend. In +// the following the abstractions used for this package are listed. More +// information can be found in the restic design document. +// +// File +// +// A file is a named handle for some data saved in the backend. For the local +// backend, this corresponds to actual files saved to disk. Usually, the SHA256 +// hash of the content is used for a file's name (hexadecimal, in lower-case +// ASCII characters). An exception is the file `config`. Most files are +// encrypted before being saved in a backend. This means that the name is the +// hash of the ciphertext. +// +// Blob +// +// A blob is a number of bytes that has a type (data or tree). Blobs are +// identified by an ID, which is the SHA256 hash of the blobs' contents. One or +// more blobs are bundled together in a Pack and then saved to the backend. +// Blobs are always encrypted before being bundled in a Pack. +// +// Pack +// +// A Pack is a File in the backend that contains one or more (encrypted) blobs, +// followed by a header at the end of the Pack. The header is encrypted and +// contains the ID, type, length and offset for each blob contained in the +// Pack. +// +package repository diff --git a/internal/repository/index.go b/internal/repository/index.go new file mode 100644 index 000000000..8d6d64c3e --- /dev/null +++ b/internal/repository/index.go @@ -0,0 +1,569 @@ +package repository + +import ( + "context" + "encoding/json" + "io" + "sync" + "time" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + + "github.com/restic/restic/internal/debug" +) + +// Index holds a lookup table for id -> pack. +type Index struct { + m sync.Mutex + pack map[restic.BlobHandle][]indexEntry + treePacks restic.IDs + + final bool // set to true for all indexes read from the backend ("finalized") + id restic.ID // set to the ID of the index when it's finalized + supersedes restic.IDs + created time.Time +} + +type indexEntry struct { + packID restic.ID + offset uint + length uint +} + +// NewIndex returns a new index. +func NewIndex() *Index { + return &Index{ + pack: make(map[restic.BlobHandle][]indexEntry), + created: time.Now(), + } +} + +func (idx *Index) store(blob restic.PackedBlob) { + newEntry := indexEntry{ + packID: blob.PackID, + offset: blob.Offset, + length: blob.Length, + } + h := restic.BlobHandle{ID: blob.ID, Type: blob.Type} + idx.pack[h] = append(idx.pack[h], newEntry) +} + +// Final returns true iff the index is already written to the repository, it is +// finalized. +func (idx *Index) Final() bool { + idx.m.Lock() + defer idx.m.Unlock() + + return idx.final +} + +const ( + indexMinBlobs = 20 + indexMaxBlobs = 2000 + indexMinAge = 2 * time.Minute + indexMaxAge = 15 * time.Minute +) + +// IndexFull returns true iff the index is "full enough" to be saved as a preliminary index. +var IndexFull = func(idx *Index) bool { + idx.m.Lock() + defer idx.m.Unlock() + + debug.Log("checking whether index %p is full", idx) + + packs := len(idx.pack) + age := time.Now().Sub(idx.created) + + if age > indexMaxAge { + debug.Log("index %p is old enough", idx, age) + return true + } + + if packs < indexMinBlobs || age < indexMinAge { + debug.Log("index %p only has %d packs or is too young (%v)", idx, packs, age) + return false + } + + if packs > indexMaxBlobs { + debug.Log("index %p has %d packs", idx, packs) + return true + } + + debug.Log("index %p is not full", idx) + return false +} + +// Store remembers the id and pack in the index. An existing entry will be +// silently overwritten. +func (idx *Index) Store(blob restic.PackedBlob) { + idx.m.Lock() + defer idx.m.Unlock() + + if idx.final { + panic("store new item in finalized index") + } + + debug.Log("%v", blob) + + idx.store(blob) +} + +// Lookup queries the index for the blob ID and returns a restic.PackedBlob. +func (idx *Index) Lookup(id restic.ID, tpe restic.BlobType) (blobs []restic.PackedBlob, found bool) { + idx.m.Lock() + defer idx.m.Unlock() + + h := restic.BlobHandle{ID: id, Type: tpe} + + if packs, ok := idx.pack[h]; ok { + blobs = make([]restic.PackedBlob, 0, len(packs)) + + for _, p := range packs { + blob := restic.PackedBlob{ + Blob: restic.Blob{ + Type: tpe, + Length: p.length, + ID: id, + Offset: p.offset, + }, + PackID: p.packID, + } + + blobs = append(blobs, blob) + } + + return blobs, true + } + + return nil, false +} + +// ListPack returns a list of blobs contained in a pack. +func (idx *Index) ListPack(id restic.ID) (list []restic.PackedBlob) { + idx.m.Lock() + defer idx.m.Unlock() + + for h, packList := range idx.pack { + for _, entry := range packList { + if entry.packID == id { + list = append(list, restic.PackedBlob{ + Blob: restic.Blob{ + ID: h.ID, + Type: h.Type, + Length: entry.length, + Offset: entry.offset, + }, + PackID: entry.packID, + }) + } + } + } + + return list +} + +// Has returns true iff the id is listed in the index. +func (idx *Index) Has(id restic.ID, tpe restic.BlobType) bool { + idx.m.Lock() + defer idx.m.Unlock() + + h := restic.BlobHandle{ID: id, Type: tpe} + + _, ok := idx.pack[h] + return ok +} + +// LookupSize returns the length of the plaintext content of the blob with the +// given id. +func (idx *Index) LookupSize(id restic.ID, tpe restic.BlobType) (plaintextLength uint, found bool) { + blobs, found := idx.Lookup(id, tpe) + if !found { + return 0, found + } + + return uint(restic.PlaintextLength(int(blobs[0].Length))), true +} + +// Supersedes returns the list of indexes this index supersedes, if any. +func (idx *Index) Supersedes() restic.IDs { + return idx.supersedes +} + +// AddToSupersedes adds the ids to the list of indexes superseded by this +// index. If the index has already been finalized, an error is returned. +func (idx *Index) AddToSupersedes(ids ...restic.ID) error { + idx.m.Lock() + defer idx.m.Unlock() + + if idx.final { + return errors.New("index already finalized") + } + + idx.supersedes = append(idx.supersedes, ids...) + return nil +} + +// Each returns a channel that yields all blobs known to the index. When the +// context is cancelled, the background goroutine terminates. This blocks any +// modification of the index. +func (idx *Index) Each(ctx context.Context) <-chan restic.PackedBlob { + idx.m.Lock() + + ch := make(chan restic.PackedBlob) + + go func() { + defer idx.m.Unlock() + defer func() { + close(ch) + }() + + for h, packs := range idx.pack { + for _, blob := range packs { + select { + case <-ctx.Done(): + return + case ch <- restic.PackedBlob{ + Blob: restic.Blob{ + ID: h.ID, + Type: h.Type, + Offset: blob.offset, + Length: blob.length, + }, + PackID: blob.packID, + }: + } + } + } + }() + + return ch +} + +// Packs returns all packs in this index +func (idx *Index) Packs() restic.IDSet { + idx.m.Lock() + defer idx.m.Unlock() + + packs := restic.NewIDSet() + for _, list := range idx.pack { + for _, entry := range list { + packs.Insert(entry.packID) + } + } + + return packs +} + +// Count returns the number of blobs of type t in the index. +func (idx *Index) Count(t restic.BlobType) (n uint) { + debug.Log("counting blobs of type %v", t) + idx.m.Lock() + defer idx.m.Unlock() + + for h, list := range idx.pack { + if h.Type != t { + continue + } + + n += uint(len(list)) + } + + return +} + +type packJSON struct { + ID restic.ID `json:"id"` + Blobs []blobJSON `json:"blobs"` +} + +type blobJSON struct { + ID restic.ID `json:"id"` + Type restic.BlobType `json:"type"` + Offset uint `json:"offset"` + Length uint `json:"length"` +} + +// generatePackList returns a list of packs. +func (idx *Index) generatePackList() ([]*packJSON, error) { + list := []*packJSON{} + packs := make(map[restic.ID]*packJSON) + + for h, packedBlobs := range idx.pack { + for _, blob := range packedBlobs { + if blob.packID.IsNull() { + panic("null pack id") + } + + debug.Log("handle blob %v", h) + + if blob.packID.IsNull() { + debug.Log("blob %v has no packID! (offset %v, length %v)", + h, blob.offset, blob.length) + return nil, errors.Errorf("unable to serialize index: pack for blob %v hasn't been written yet", h) + } + + // see if pack is already in map + p, ok := packs[blob.packID] + if !ok { + // else create new pack + p = &packJSON{ID: blob.packID} + + // and append it to the list and map + list = append(list, p) + packs[p.ID] = p + } + + // add blob + p.Blobs = append(p.Blobs, blobJSON{ + ID: h.ID, + Type: h.Type, + Offset: blob.offset, + Length: blob.length, + }) + } + } + + debug.Log("done") + + return list, nil +} + +type jsonIndex struct { + Supersedes restic.IDs `json:"supersedes,omitempty"` + Packs []*packJSON `json:"packs"` +} + +// Encode writes the JSON serialization of the index to the writer w. +func (idx *Index) Encode(w io.Writer) error { + debug.Log("encoding index") + idx.m.Lock() + defer idx.m.Unlock() + + return idx.encode(w) +} + +// encode writes the JSON serialization of the index to the writer w. +func (idx *Index) encode(w io.Writer) error { + debug.Log("encoding index") + + list, err := idx.generatePackList() + if err != nil { + return err + } + + enc := json.NewEncoder(w) + idxJSON := jsonIndex{ + Supersedes: idx.supersedes, + Packs: list, + } + return enc.Encode(idxJSON) +} + +// Finalize sets the index to final and writes the JSON serialization to w. +func (idx *Index) Finalize(w io.Writer) error { + debug.Log("encoding index") + idx.m.Lock() + defer idx.m.Unlock() + + idx.final = true + + return idx.encode(w) +} + +// ID returns the ID of the index, if available. If the index is not yet +// finalized, an error is returned. +func (idx *Index) ID() (restic.ID, error) { + idx.m.Lock() + defer idx.m.Unlock() + + if !idx.final { + return restic.ID{}, errors.New("index not finalized") + } + + return idx.id, nil +} + +// SetID sets the ID the index has been written to. This requires that +// Finalize() has been called before, otherwise an error is returned. +func (idx *Index) SetID(id restic.ID) error { + idx.m.Lock() + defer idx.m.Unlock() + + if !idx.final { + return errors.New("index is not final") + } + + if !idx.id.IsNull() { + return errors.New("ID already set") + } + + debug.Log("ID set to %v", id) + idx.id = id + + return nil +} + +// Dump writes the pretty-printed JSON representation of the index to w. +func (idx *Index) Dump(w io.Writer) error { + debug.Log("dumping index") + idx.m.Lock() + defer idx.m.Unlock() + + list, err := idx.generatePackList() + if err != nil { + return err + } + + outer := jsonIndex{ + Supersedes: idx.Supersedes(), + Packs: list, + } + + buf, err := json.MarshalIndent(outer, "", " ") + if err != nil { + return err + } + + _, err = w.Write(append(buf, '\n')) + if err != nil { + return errors.Wrap(err, "Write") + } + + debug.Log("done") + + return nil +} + +// TreePacks returns a list of packs that contain only tree blobs. +func (idx *Index) TreePacks() restic.IDs { + return idx.treePacks +} + +// isErrOldIndex returns true if the error may be caused by an old index +// format. +func isErrOldIndex(err error) bool { + if e, ok := err.(*json.UnmarshalTypeError); ok && e.Value == "array" { + return true + } + + return false +} + +// ErrOldIndexFormat means an index with the old format was detected. +var ErrOldIndexFormat = errors.New("index has old format") + +// DecodeIndex loads and unserializes an index from rd. +func DecodeIndex(buf []byte) (idx *Index, err error) { + debug.Log("Start decoding index") + idxJSON := &jsonIndex{} + + err = json.Unmarshal(buf, idxJSON) + if err != nil { + debug.Log("Error %v", err) + + if isErrOldIndex(err) { + debug.Log("index is probably old format, trying that") + err = ErrOldIndexFormat + } + + return nil, errors.Wrap(err, "Decode") + } + + idx = NewIndex() + for _, pack := range idxJSON.Packs { + var data, tree bool + + for _, blob := range pack.Blobs { + idx.store(restic.PackedBlob{ + Blob: restic.Blob{ + Type: blob.Type, + ID: blob.ID, + Offset: blob.Offset, + Length: blob.Length, + }, + PackID: pack.ID, + }) + + switch blob.Type { + case restic.DataBlob: + data = true + case restic.TreeBlob: + tree = true + } + } + + if !data && tree { + idx.treePacks = append(idx.treePacks, pack.ID) + } + } + idx.supersedes = idxJSON.Supersedes + idx.final = true + + debug.Log("done") + return idx, nil +} + +// DecodeOldIndex loads and unserializes an index in the old format from rd. +func DecodeOldIndex(buf []byte) (idx *Index, err error) { + debug.Log("Start decoding old index") + list := []*packJSON{} + + err = json.Unmarshal(buf, &list) + if err != nil { + debug.Log("Error %#v", err) + return nil, errors.Wrap(err, "Decode") + } + + idx = NewIndex() + for _, pack := range list { + var data, tree bool + + for _, blob := range pack.Blobs { + idx.store(restic.PackedBlob{ + Blob: restic.Blob{ + Type: blob.Type, + ID: blob.ID, + Offset: blob.Offset, + Length: blob.Length, + }, + PackID: pack.ID, + }) + + switch blob.Type { + case restic.DataBlob: + data = true + case restic.TreeBlob: + tree = true + } + } + + if !data && tree { + idx.treePacks = append(idx.treePacks, pack.ID) + } + } + idx.final = true + + debug.Log("done") + return idx, nil +} + +// LoadIndexWithDecoder loads the index and decodes it with fn. +func LoadIndexWithDecoder(ctx context.Context, repo restic.Repository, id restic.ID, fn func([]byte) (*Index, error)) (idx *Index, err error) { + debug.Log("Loading index %v", id) + + buf, err := repo.LoadAndDecrypt(ctx, restic.IndexFile, id) + if err != nil { + return nil, err + } + + idx, err = fn(buf) + if err != nil { + debug.Log("error while decoding index %v: %v", id, err) + return nil, err + } + + idx.id = id + + return idx, nil +} diff --git a/internal/repository/index_test.go b/internal/repository/index_test.go new file mode 100644 index 000000000..599f243f3 --- /dev/null +++ b/internal/repository/index_test.go @@ -0,0 +1,492 @@ +package repository_test + +import ( + "bytes" + "math/rand" + "testing" + + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func TestIndexSerialize(t *testing.T) { + type testEntry struct { + id restic.ID + pack restic.ID + tpe restic.BlobType + offset, length uint + } + tests := []testEntry{} + + idx := repository.NewIndex() + + // create 50 packs with 20 blobs each + for i := 0; i < 50; i++ { + packID := restic.NewRandomID() + + pos := uint(0) + for j := 0; j < 20; j++ { + id := restic.NewRandomID() + length := uint(i*100 + j) + idx.Store(restic.PackedBlob{ + Blob: restic.Blob{ + Type: restic.DataBlob, + ID: id, + Offset: pos, + Length: length, + }, + PackID: packID, + }) + + tests = append(tests, testEntry{ + id: id, + pack: packID, + tpe: restic.DataBlob, + offset: pos, + length: length, + }) + + pos += length + } + } + + wr := bytes.NewBuffer(nil) + err := idx.Encode(wr) + rtest.OK(t, err) + + idx2, err := repository.DecodeIndex(wr.Bytes()) + rtest.OK(t, err) + rtest.Assert(t, idx2 != nil, + "nil returned for decoded index") + + wr2 := bytes.NewBuffer(nil) + err = idx2.Encode(wr2) + rtest.OK(t, err) + + for _, testBlob := range tests { + list, found := idx.Lookup(testBlob.id, testBlob.tpe) + rtest.Assert(t, found, "Expected to find blob id %v", testBlob.id.Str()) + + if len(list) != 1 { + t.Errorf("expected one result for blob %v, got %v: %v", testBlob.id.Str(), len(list), list) + } + result := list[0] + + rtest.Equals(t, testBlob.pack, result.PackID) + rtest.Equals(t, testBlob.tpe, result.Type) + rtest.Equals(t, testBlob.offset, result.Offset) + rtest.Equals(t, testBlob.length, result.Length) + + list2, found := idx2.Lookup(testBlob.id, testBlob.tpe) + rtest.Assert(t, found, "Expected to find blob id %v", testBlob.id) + + if len(list2) != 1 { + t.Errorf("expected one result for blob %v, got %v: %v", testBlob.id.Str(), len(list2), list2) + } + result2 := list2[0] + + rtest.Equals(t, testBlob.pack, result2.PackID) + rtest.Equals(t, testBlob.tpe, result2.Type) + rtest.Equals(t, testBlob.offset, result2.Offset) + rtest.Equals(t, testBlob.length, result2.Length) + } + + // add more blobs to idx + newtests := []testEntry{} + for i := 0; i < 10; i++ { + packID := restic.NewRandomID() + + pos := uint(0) + for j := 0; j < 10; j++ { + id := restic.NewRandomID() + length := uint(i*100 + j) + idx.Store(restic.PackedBlob{ + Blob: restic.Blob{ + Type: restic.DataBlob, + ID: id, + Offset: pos, + Length: length, + }, + PackID: packID, + }) + + newtests = append(newtests, testEntry{ + id: id, + pack: packID, + tpe: restic.DataBlob, + offset: pos, + length: length, + }) + + pos += length + } + } + + // serialize idx, unserialize to idx3 + wr3 := bytes.NewBuffer(nil) + err = idx.Finalize(wr3) + rtest.OK(t, err) + + rtest.Assert(t, idx.Final(), + "index not final after encoding") + + id := restic.NewRandomID() + rtest.OK(t, idx.SetID(id)) + id2, err := idx.ID() + rtest.Assert(t, id2.Equal(id), + "wrong ID returned: want %v, got %v", id, id2) + + idx3, err := repository.DecodeIndex(wr3.Bytes()) + rtest.OK(t, err) + rtest.Assert(t, idx3 != nil, + "nil returned for decoded index") + rtest.Assert(t, idx3.Final(), + "decoded index is not final") + + // all new blobs must be in the index + for _, testBlob := range newtests { + list, found := idx3.Lookup(testBlob.id, testBlob.tpe) + rtest.Assert(t, found, "Expected to find blob id %v", testBlob.id.Str()) + + if len(list) != 1 { + t.Errorf("expected one result for blob %v, got %v: %v", testBlob.id.Str(), len(list), list) + } + + blob := list[0] + + rtest.Equals(t, testBlob.pack, blob.PackID) + rtest.Equals(t, testBlob.tpe, blob.Type) + rtest.Equals(t, testBlob.offset, blob.Offset) + rtest.Equals(t, testBlob.length, blob.Length) + } +} + +func TestIndexSize(t *testing.T) { + idx := repository.NewIndex() + + packs := 200 + blobs := 100 + for i := 0; i < packs; i++ { + packID := restic.NewRandomID() + + pos := uint(0) + for j := 0; j < blobs; j++ { + id := restic.NewRandomID() + length := uint(i*100 + j) + idx.Store(restic.PackedBlob{ + Blob: restic.Blob{ + Type: restic.DataBlob, + ID: id, + Offset: pos, + Length: length, + }, + PackID: packID, + }) + + pos += length + } + } + + wr := bytes.NewBuffer(nil) + + err := idx.Encode(wr) + rtest.OK(t, err) + + t.Logf("Index file size for %d blobs in %d packs is %d", blobs*packs, packs, wr.Len()) +} + +// example index serialization from doc/Design.rst +var docExample = []byte(` +{ + "supersedes": [ + "ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452" + ], + "packs": [ + { + "id": "73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c", + "blobs": [ + { + "id": "3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce", + "type": "data", + "offset": 0, + "length": 25 + },{ + "id": "9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae", + "type": "tree", + "offset": 38, + "length": 100 + }, + { + "id": "d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66", + "type": "data", + "offset": 150, + "length": 123 + } + ] + } + ] +} +`) + +var docOldExample = []byte(` +[ { + "id": "73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c", + "blobs": [ + { + "id": "3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce", + "type": "data", + "offset": 0, + "length": 25 + },{ + "id": "9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae", + "type": "tree", + "offset": 38, + "length": 100 + }, + { + "id": "d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66", + "type": "data", + "offset": 150, + "length": 123 + } + ] +} ] +`) + +var exampleTests = []struct { + id, packID restic.ID + tpe restic.BlobType + offset, length uint +}{ + { + restic.TestParseID("3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce"), + restic.TestParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"), + restic.DataBlob, 0, 25, + }, { + restic.TestParseID("9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae"), + restic.TestParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"), + restic.TreeBlob, 38, 100, + }, { + restic.TestParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66"), + restic.TestParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"), + restic.DataBlob, 150, 123, + }, +} + +var exampleLookupTest = struct { + packID restic.ID + blobs map[restic.ID]restic.BlobType +}{ + restic.TestParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"), + map[restic.ID]restic.BlobType{ + restic.TestParseID("3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce"): restic.DataBlob, + restic.TestParseID("9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae"): restic.TreeBlob, + restic.TestParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66"): restic.DataBlob, + }, +} + +func TestIndexUnserialize(t *testing.T) { + oldIdx := restic.IDs{restic.TestParseID("ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452")} + + idx, err := repository.DecodeIndex(docExample) + rtest.OK(t, err) + + for _, test := range exampleTests { + list, found := idx.Lookup(test.id, test.tpe) + rtest.Assert(t, found, "Expected to find blob id %v", test.id.Str()) + + if len(list) != 1 { + t.Errorf("expected one result for blob %v, got %v: %v", test.id.Str(), len(list), list) + } + blob := list[0] + + t.Logf("looking for blob %v/%v, got %v", test.tpe, test.id.Str(), blob) + + rtest.Equals(t, test.packID, blob.PackID) + rtest.Equals(t, test.tpe, blob.Type) + rtest.Equals(t, test.offset, blob.Offset) + rtest.Equals(t, test.length, blob.Length) + } + + rtest.Equals(t, oldIdx, idx.Supersedes()) + + blobs := idx.ListPack(exampleLookupTest.packID) + if len(blobs) != len(exampleLookupTest.blobs) { + t.Fatalf("expected %d blobs in pack, got %d", len(exampleLookupTest.blobs), len(blobs)) + } + + for _, blob := range blobs { + b, ok := exampleLookupTest.blobs[blob.ID] + if !ok { + t.Errorf("unexpected blob %v found", blob.ID.Str()) + } + if blob.Type != b { + t.Errorf("unexpected type for blob %v: want %v, got %v", blob.ID.Str(), b, blob.Type) + } + } +} + +func BenchmarkDecodeIndex(b *testing.B) { + b.ResetTimer() + + for i := 0; i < b.N; i++ { + _, err := repository.DecodeIndex(docExample) + rtest.OK(b, err) + } +} + +func TestIndexUnserializeOld(t *testing.T) { + idx, err := repository.DecodeOldIndex(docOldExample) + rtest.OK(t, err) + + for _, test := range exampleTests { + list, found := idx.Lookup(test.id, test.tpe) + rtest.Assert(t, found, "Expected to find blob id %v", test.id.Str()) + + if len(list) != 1 { + t.Errorf("expected one result for blob %v, got %v: %v", test.id.Str(), len(list), list) + } + blob := list[0] + + rtest.Equals(t, test.packID, blob.PackID) + rtest.Equals(t, test.tpe, blob.Type) + rtest.Equals(t, test.offset, blob.Offset) + rtest.Equals(t, test.length, blob.Length) + } + + rtest.Equals(t, 0, len(idx.Supersedes())) +} + +func TestIndexPacks(t *testing.T) { + idx := repository.NewIndex() + packs := restic.NewIDSet() + + for i := 0; i < 20; i++ { + packID := restic.NewRandomID() + idx.Store(restic.PackedBlob{ + Blob: restic.Blob{ + Type: restic.DataBlob, + ID: restic.NewRandomID(), + Offset: 0, + Length: 23, + }, + PackID: packID, + }) + + packs.Insert(packID) + } + + idxPacks := idx.Packs() + rtest.Assert(t, packs.Equals(idxPacks), "packs in index do not match packs added to index") +} + +const maxPackSize = 16 * 1024 * 1024 + +// This function generates a (insecure) random ID, similar to NewRandomID +func NewRandomTestID(rng *rand.Rand) restic.ID { + id := restic.ID{} + rng.Read(id[:]) + return id +} + +func createRandomIndex(rng *rand.Rand) (idx *repository.Index, lookupID restic.ID) { + idx = repository.NewIndex() + + // create index with 200k pack files + for i := 0; i < 200000; i++ { + packID := NewRandomTestID(rng) + offset := 0 + for offset < maxPackSize { + size := 2000 + rand.Intn(4*1024*1024) + id := NewRandomTestID(rng) + idx.Store(restic.PackedBlob{ + PackID: packID, + Blob: restic.Blob{ + Type: restic.DataBlob, + ID: id, + Length: uint(size), + Offset: uint(offset), + }, + }) + + offset += size + + if rand.Float32() < 0.001 && lookupID.IsNull() { + lookupID = id + } + } + } + + return idx, lookupID +} + +func BenchmarkIndexHasUnknown(b *testing.B) { + idx, _ := createRandomIndex(rand.New(rand.NewSource(0))) + lookupID := restic.NewRandomID() + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + idx.Has(lookupID, restic.DataBlob) + } +} + +func BenchmarkIndexHasKnown(b *testing.B) { + idx, lookupID := createRandomIndex(rand.New(rand.NewSource(0))) + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + idx.Has(lookupID, restic.DataBlob) + } +} + +func TestIndexHas(t *testing.T) { + type testEntry struct { + id restic.ID + pack restic.ID + tpe restic.BlobType + offset, length uint + } + tests := []testEntry{} + + idx := repository.NewIndex() + + // create 50 packs with 20 blobs each + for i := 0; i < 50; i++ { + packID := restic.NewRandomID() + + pos := uint(0) + for j := 0; j < 20; j++ { + id := restic.NewRandomID() + length := uint(i*100 + j) + idx.Store(restic.PackedBlob{ + Blob: restic.Blob{ + Type: restic.DataBlob, + ID: id, + Offset: pos, + Length: length, + }, + PackID: packID, + }) + + tests = append(tests, testEntry{ + id: id, + pack: packID, + tpe: restic.DataBlob, + offset: pos, + length: length, + }) + + pos += length + } + } + + for _, testBlob := range tests { + rtest.Assert(t, idx.Has(testBlob.id, testBlob.tpe), "Index reports not having data blob added to it") + } + + rtest.Assert(t, !idx.Has(restic.NewRandomID(), restic.DataBlob), "Index reports having a data blob not added to it") + rtest.Assert(t, !idx.Has(tests[0].id, restic.TreeBlob), "Index reports having a tree blob added to it with the same id as a data blob") +} diff --git a/internal/repository/key.go b/internal/repository/key.go new file mode 100644 index 000000000..46e3b912f --- /dev/null +++ b/internal/repository/key.go @@ -0,0 +1,302 @@ +package repository + +import ( + "context" + "encoding/json" + "fmt" + "os" + "os/user" + "time" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/crypto" + "github.com/restic/restic/internal/debug" +) + +var ( + // ErrNoKeyFound is returned when no key for the repository could be decrypted. + ErrNoKeyFound = errors.Fatal("wrong password or no key found") + + // ErrMaxKeysReached is returned when the maximum number of keys was checked and no key could be found. + ErrMaxKeysReached = errors.Fatal("maximum number of keys reached") +) + +// Key represents an encrypted master key for a repository. +type Key struct { + Created time.Time `json:"created"` + Username string `json:"username"` + Hostname string `json:"hostname"` + + KDF string `json:"kdf"` + N int `json:"N"` + R int `json:"r"` + P int `json:"p"` + Salt []byte `json:"salt"` + Data []byte `json:"data"` + + user *crypto.Key + master *crypto.Key + + name string +} + +// Params tracks the parameters used for the KDF. If not set, it will be +// calibrated on the first run of AddKey(). +var Params *crypto.Params + +var ( + // KDFTimeout specifies the maximum runtime for the KDF. + KDFTimeout = 500 * time.Millisecond + + // KDFMemory limits the memory the KDF is allowed to use. + KDFMemory = 60 +) + +// createMasterKey creates a new master key in the given backend and encrypts +// it with the password. +func createMasterKey(s *Repository, password string) (*Key, error) { + return AddKey(context.TODO(), s, password, nil) +} + +// OpenKey tries do decrypt the key specified by name with the given password. +func OpenKey(ctx context.Context, s *Repository, name string, password string) (*Key, error) { + k, err := LoadKey(ctx, s, name) + if err != nil { + debug.Log("LoadKey(%v) returned error %v", name, err) + return nil, err + } + + // check KDF + if k.KDF != "scrypt" { + return nil, errors.New("only supported KDF is scrypt()") + } + + // derive user key + params := crypto.Params{ + N: k.N, + R: k.R, + P: k.P, + } + k.user, err = crypto.KDF(params, k.Salt, password) + if err != nil { + return nil, errors.Wrap(err, "crypto.KDF") + } + + // decrypt master keys + nonce, ciphertext := k.Data[:k.user.NonceSize()], k.Data[k.user.NonceSize():] + buf, err := k.user.Open(nil, nonce, ciphertext, nil) + if err != nil { + return nil, err + } + + // restore json + k.master = &crypto.Key{} + err = json.Unmarshal(buf, k.master) + if err != nil { + debug.Log("Unmarshal() returned error %v", err) + return nil, errors.Wrap(err, "Unmarshal") + } + k.name = name + + if !k.Valid() { + return nil, errors.New("Invalid key for repository") + } + + return k, nil +} + +// SearchKey tries to decrypt at most maxKeys keys in the backend with the +// given password. If none could be found, ErrNoKeyFound is returned. When +// maxKeys is reached, ErrMaxKeysReached is returned. When setting maxKeys to +// zero, all keys in the repo are checked. +func SearchKey(ctx context.Context, s *Repository, password string, maxKeys int, keyHint string) (k *Key, err error) { + checked := 0 + + if len(keyHint) > 0 { + id, err := restic.Find(s.Backend(), restic.KeyFile, keyHint) + + if err == nil { + key, err := OpenKey(ctx, s, id, password) + + if err == nil { + debug.Log("successfully opened hinted key %v", id) + return key, nil + } + + debug.Log("could not open hinted key %v", id) + } else { + debug.Log("Could not find hinted key %v", keyHint) + } + } + + listCtx, cancel := context.WithCancel(ctx) + defer cancel() + + // try at most maxKeys keys in repo + err = s.Backend().List(listCtx, restic.KeyFile, func(fi restic.FileInfo) error { + if maxKeys > 0 && checked > maxKeys { + return ErrMaxKeysReached + } + + _, err := restic.ParseID(fi.Name) + if err != nil { + debug.Log("rejecting key with invalid name: %v", fi.Name) + return nil + } + + debug.Log("trying key %q", fi.Name) + key, err := OpenKey(ctx, s, fi.Name, password) + if err != nil { + debug.Log("key %v returned error %v", fi.Name, err) + + // ErrUnauthenticated means the password is wrong, try the next key + if errors.Cause(err) == crypto.ErrUnauthenticated { + return nil + } + + return err + } + + debug.Log("successfully opened key %v", fi.Name) + k = key + cancel() + return nil + }) + + if err == context.Canceled { + err = nil + } + + if err != nil { + return nil, err + } + + if k == nil { + return nil, ErrNoKeyFound + } + + return k, nil +} + +// LoadKey loads a key from the backend. +func LoadKey(ctx context.Context, s *Repository, name string) (k *Key, err error) { + h := restic.Handle{Type: restic.KeyFile, Name: name} + data, err := backend.LoadAll(ctx, s.be, h) + if err != nil { + return nil, err + } + + k = &Key{} + err = json.Unmarshal(data, k) + if err != nil { + return nil, errors.Wrap(err, "Unmarshal") + } + + return k, nil +} + +// AddKey adds a new key to an already existing repository. +func AddKey(ctx context.Context, s *Repository, password string, template *crypto.Key) (*Key, error) { + // make sure we have valid KDF parameters + if Params == nil { + p, err := crypto.Calibrate(KDFTimeout, KDFMemory) + if err != nil { + return nil, errors.Wrap(err, "Calibrate") + } + + Params = &p + debug.Log("calibrated KDF parameters are %v", p) + } + + // fill meta data about key + newkey := &Key{ + Created: time.Now(), + KDF: "scrypt", + N: Params.N, + R: Params.R, + P: Params.P, + } + + hn, err := os.Hostname() + if err == nil { + newkey.Hostname = hn + } + + usr, err := user.Current() + if err == nil { + newkey.Username = usr.Username + } + + // generate random salt + newkey.Salt, err = crypto.NewSalt() + if err != nil { + panic("unable to read enough random bytes for salt: " + err.Error()) + } + + // call KDF to derive user key + newkey.user, err = crypto.KDF(*Params, newkey.Salt, password) + if err != nil { + return nil, err + } + + if template == nil { + // generate new random master keys + newkey.master = crypto.NewRandomKey() + } else { + // copy master keys from old key + newkey.master = template + } + + // encrypt master keys (as json) with user key + buf, err := json.Marshal(newkey.master) + if err != nil { + return nil, errors.Wrap(err, "Marshal") + } + + nonce := crypto.NewRandomNonce() + ciphertext := make([]byte, 0, len(buf)+newkey.user.Overhead()+newkey.user.NonceSize()) + ciphertext = append(ciphertext, nonce...) + ciphertext = newkey.user.Seal(ciphertext, nonce, buf, nil) + newkey.Data = ciphertext + + // dump as json + buf, err = json.Marshal(newkey) + if err != nil { + return nil, errors.Wrap(err, "Marshal") + } + + // store in repository and return + h := restic.Handle{ + Type: restic.KeyFile, + Name: restic.Hash(buf).String(), + } + + err = s.be.Save(ctx, h, restic.NewByteReader(buf)) + if err != nil { + return nil, err + } + + newkey.name = h.Name + + return newkey, nil +} + +func (k *Key) String() string { + if k == nil { + return "" + } + return fmt.Sprintf("", k.Username, k.Hostname, k.Created) +} + +// Name returns an identifier for the key. +func (k Key) Name() string { + return k.name +} + +// Valid tests whether the mac and encryption keys are valid (i.e. not zero) +func (k *Key) Valid() bool { + return k.user.Valid() && k.master.Valid() +} diff --git a/internal/repository/master_index.go b/internal/repository/master_index.go new file mode 100644 index 000000000..163884e4e --- /dev/null +++ b/internal/repository/master_index.go @@ -0,0 +1,258 @@ +package repository + +import ( + "context" + "sync" + + "github.com/restic/restic/internal/restic" + + "github.com/restic/restic/internal/debug" +) + +// MasterIndex is a collection of indexes and IDs of chunks that are in the process of being saved. +type MasterIndex struct { + idx []*Index + idxMutex sync.RWMutex +} + +// NewMasterIndex creates a new master index. +func NewMasterIndex() *MasterIndex { + return &MasterIndex{} +} + +// Lookup queries all known Indexes for the ID and returns the first match. +func (mi *MasterIndex) Lookup(id restic.ID, tpe restic.BlobType) (blobs []restic.PackedBlob, found bool) { + mi.idxMutex.RLock() + defer mi.idxMutex.RUnlock() + + for _, idx := range mi.idx { + blobs, found = idx.Lookup(id, tpe) + if found { + return + } + } + + return nil, false +} + +// LookupSize queries all known Indexes for the ID and returns the first match. +func (mi *MasterIndex) LookupSize(id restic.ID, tpe restic.BlobType) (uint, bool) { + mi.idxMutex.RLock() + defer mi.idxMutex.RUnlock() + + for _, idx := range mi.idx { + if size, found := idx.LookupSize(id, tpe); found { + return size, found + } + } + + return 0, false +} + +// ListPack returns the list of blobs in a pack. The first matching index is +// returned, or nil if no index contains information about the pack id. +func (mi *MasterIndex) ListPack(id restic.ID) (list []restic.PackedBlob) { + mi.idxMutex.RLock() + defer mi.idxMutex.RUnlock() + + for _, idx := range mi.idx { + list := idx.ListPack(id) + if len(list) > 0 { + return list + } + } + + return nil +} + +// Has queries all known Indexes for the ID and returns the first match. +func (mi *MasterIndex) Has(id restic.ID, tpe restic.BlobType) bool { + mi.idxMutex.RLock() + defer mi.idxMutex.RUnlock() + + for _, idx := range mi.idx { + if idx.Has(id, tpe) { + return true + } + } + + return false +} + +// Count returns the number of blobs of type t in the index. +func (mi *MasterIndex) Count(t restic.BlobType) (n uint) { + mi.idxMutex.RLock() + defer mi.idxMutex.RUnlock() + + var sum uint + for _, idx := range mi.idx { + sum += idx.Count(t) + } + + return sum +} + +// Insert adds a new index to the MasterIndex. +func (mi *MasterIndex) Insert(idx *Index) { + mi.idxMutex.Lock() + defer mi.idxMutex.Unlock() + + mi.idx = append(mi.idx, idx) +} + +// Remove deletes an index from the MasterIndex. +func (mi *MasterIndex) Remove(index *Index) { + mi.idxMutex.Lock() + defer mi.idxMutex.Unlock() + + for i, idx := range mi.idx { + if idx == index { + mi.idx = append(mi.idx[:i], mi.idx[i+1:]...) + return + } + } +} + +// Store remembers the id and pack in the index. +func (mi *MasterIndex) Store(pb restic.PackedBlob) { + mi.idxMutex.Lock() + defer mi.idxMutex.Unlock() + + for _, idx := range mi.idx { + if !idx.Final() { + idx.Store(pb) + return + } + } + + newIdx := NewIndex() + newIdx.Store(pb) + mi.idx = append(mi.idx, newIdx) +} + +// NotFinalIndexes returns all indexes that have not yet been saved. +func (mi *MasterIndex) NotFinalIndexes() []*Index { + mi.idxMutex.Lock() + defer mi.idxMutex.Unlock() + + var list []*Index + + for _, idx := range mi.idx { + if !idx.Final() { + list = append(list, idx) + } + } + + debug.Log("return %d indexes", len(list)) + return list +} + +// FullIndexes returns all indexes that are full. +func (mi *MasterIndex) FullIndexes() []*Index { + mi.idxMutex.Lock() + defer mi.idxMutex.Unlock() + + var list []*Index + + debug.Log("checking %d indexes", len(mi.idx)) + for _, idx := range mi.idx { + if idx.Final() { + debug.Log("index %p is final", idx) + continue + } + + if IndexFull(idx) { + debug.Log("index %p is full", idx) + list = append(list, idx) + } else { + debug.Log("index %p not full", idx) + } + } + + debug.Log("return %d indexes", len(list)) + return list +} + +// All returns all indexes. +func (mi *MasterIndex) All() []*Index { + mi.idxMutex.Lock() + defer mi.idxMutex.Unlock() + + return mi.idx +} + +// Each returns a channel that yields all blobs known to the index. When the +// context is cancelled, the background goroutine terminates. This blocks any +// modification of the index. +func (mi *MasterIndex) Each(ctx context.Context) <-chan restic.PackedBlob { + mi.idxMutex.RLock() + + ch := make(chan restic.PackedBlob) + + go func() { + defer mi.idxMutex.RUnlock() + defer func() { + close(ch) + }() + + for _, idx := range mi.idx { + idxCh := idx.Each(ctx) + for pb := range idxCh { + select { + case <-ctx.Done(): + return + case ch <- pb: + } + } + } + }() + + return ch +} + +// RebuildIndex combines all known indexes to a new index, leaving out any +// packs whose ID is contained in packBlacklist. The new index contains the IDs +// of all known indexes in the "supersedes" field. +func (mi *MasterIndex) RebuildIndex(packBlacklist restic.IDSet) (*Index, error) { + mi.idxMutex.Lock() + defer mi.idxMutex.Unlock() + + debug.Log("start rebuilding index of %d indexes, pack blacklist: %v", len(mi.idx), packBlacklist) + + newIndex := NewIndex() + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + for i, idx := range mi.idx { + debug.Log("adding index %d", i) + + for pb := range idx.Each(ctx) { + if packBlacklist.Has(pb.PackID) { + continue + } + + newIndex.Store(pb) + } + + if !idx.Final() { + debug.Log("index %d isn't final, don't add to supersedes field", i) + continue + } + + id, err := idx.ID() + if err != nil { + debug.Log("index %d does not have an ID: %v", err) + return nil, err + } + + debug.Log("adding index id %v to supersedes field", id) + + err = newIndex.AddToSupersedes(id) + if err != nil { + return nil, err + } + } + + return newIndex, nil +} diff --git a/internal/repository/master_index_test.go b/internal/repository/master_index_test.go new file mode 100644 index 000000000..7dfcdda5f --- /dev/null +++ b/internal/repository/master_index_test.go @@ -0,0 +1,123 @@ +package repository_test + +import ( + "math/rand" + "testing" + + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func TestMasterIndexLookup(t *testing.T) { + idInIdx1 := restic.NewRandomID() + idInIdx2 := restic.NewRandomID() + + blob1 := restic.PackedBlob{ + PackID: restic.NewRandomID(), + Blob: restic.Blob{ + Type: restic.DataBlob, + ID: idInIdx1, + Length: 10, + Offset: 0, + }, + } + + blob2 := restic.PackedBlob{ + PackID: restic.NewRandomID(), + Blob: restic.Blob{ + Type: restic.DataBlob, + ID: idInIdx2, + Length: 100, + Offset: 10, + }, + } + + idx1 := repository.NewIndex() + idx1.Store(blob1) + + idx2 := repository.NewIndex() + idx2.Store(blob2) + + mIdx := repository.NewMasterIndex() + mIdx.Insert(idx1) + mIdx.Insert(idx2) + + blobs, found := mIdx.Lookup(idInIdx1, restic.DataBlob) + rtest.Assert(t, found, "Expected to find blob id %v from index 1", idInIdx1) + rtest.Equals(t, []restic.PackedBlob{blob1}, blobs) + + blobs, found = mIdx.Lookup(idInIdx2, restic.DataBlob) + rtest.Assert(t, found, "Expected to find blob id %v from index 2", idInIdx2) + rtest.Equals(t, []restic.PackedBlob{blob2}, blobs) + + blobs, found = mIdx.Lookup(restic.NewRandomID(), restic.DataBlob) + rtest.Assert(t, !found, "Expected to not find a blob when fetching with a random id") + rtest.Assert(t, blobs == nil, "Expected no blobs when fetching with a random id") +} + +func BenchmarkMasterIndexLookupSingleIndex(b *testing.B) { + idx1, lookupID := createRandomIndex(rand.New(rand.NewSource(0))) + + mIdx := repository.NewMasterIndex() + mIdx.Insert(idx1) + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + mIdx.Lookup(lookupID, restic.DataBlob) + } +} + +func BenchmarkMasterIndexLookupMultipleIndex(b *testing.B) { + rng := rand.New(rand.NewSource(0)) + mIdx := repository.NewMasterIndex() + + for i := 0; i < 5; i++ { + idx, _ := createRandomIndex(rand.New(rng)) + mIdx.Insert(idx) + } + + idx1, lookupID := createRandomIndex(rand.New(rng)) + mIdx.Insert(idx1) + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + mIdx.Lookup(lookupID, restic.DataBlob) + } +} + +func BenchmarkMasterIndexLookupSingleIndexUnknown(b *testing.B) { + lookupID := restic.NewRandomID() + idx1, _ := createRandomIndex(rand.New(rand.NewSource(0))) + + mIdx := repository.NewMasterIndex() + mIdx.Insert(idx1) + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + mIdx.Lookup(lookupID, restic.DataBlob) + } +} + +func BenchmarkMasterIndexLookupMultipleIndexUnknown(b *testing.B) { + rng := rand.New(rand.NewSource(0)) + lookupID := restic.NewRandomID() + mIdx := repository.NewMasterIndex() + + for i := 0; i < 5; i++ { + idx, _ := createRandomIndex(rand.New(rng)) + mIdx.Insert(idx) + } + + idx1, _ := createRandomIndex(rand.New(rng)) + mIdx.Insert(idx1) + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + mIdx.Lookup(lookupID, restic.DataBlob) + } +} diff --git a/internal/repository/packer_manager.go b/internal/repository/packer_manager.go new file mode 100644 index 000000000..4884e0885 --- /dev/null +++ b/internal/repository/packer_manager.go @@ -0,0 +1,161 @@ +package repository + +import ( + "context" + "crypto/sha256" + "os" + "sync" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/hashing" + "github.com/restic/restic/internal/restic" + + "github.com/restic/restic/internal/crypto" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/pack" +) + +// Saver implements saving data in a backend. +type Saver interface { + Save(context.Context, restic.Handle, restic.RewindReader) error +} + +// Packer holds a pack.Packer together with a hash writer. +type Packer struct { + *pack.Packer + hw *hashing.Writer + tmpfile *os.File +} + +// packerManager keeps a list of open packs and creates new on demand. +type packerManager struct { + be Saver + key *crypto.Key + pm sync.Mutex + packers []*Packer +} + +const minPackSize = 4 * 1024 * 1024 + +// newPackerManager returns an new packer manager which writes temporary files +// to a temporary directory +func newPackerManager(be Saver, key *crypto.Key) *packerManager { + return &packerManager{ + be: be, + key: key, + } +} + +// findPacker returns a packer for a new blob of size bytes. Either a new one is +// created or one is returned that already has some blobs. +func (r *packerManager) findPacker() (packer *Packer, err error) { + r.pm.Lock() + defer r.pm.Unlock() + + // search for a suitable packer + if len(r.packers) > 0 { + p := r.packers[0] + r.packers = r.packers[1:] + return p, nil + } + + // no suitable packer found, return new + debug.Log("create new pack") + tmpfile, err := fs.TempFile("", "restic-temp-pack-") + if err != nil { + return nil, errors.Wrap(err, "fs.TempFile") + } + + hw := hashing.NewWriter(tmpfile, sha256.New()) + p := pack.NewPacker(r.key, hw) + packer = &Packer{ + Packer: p, + hw: hw, + tmpfile: tmpfile, + } + + return packer, nil +} + +// insertPacker appends p to s.packs. +func (r *packerManager) insertPacker(p *Packer) { + r.pm.Lock() + defer r.pm.Unlock() + + r.packers = append(r.packers, p) + debug.Log("%d packers\n", len(r.packers)) +} + +// savePacker stores p in the backend. +func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *Packer) error { + debug.Log("save packer for %v with %d blobs (%d bytes)\n", t, p.Packer.Count(), p.Packer.Size()) + _, err := p.Packer.Finalize() + if err != nil { + return err + } + + id := restic.IDFromHash(p.hw.Sum(nil)) + h := restic.Handle{Type: restic.DataFile, Name: id.String()} + + rd, err := restic.NewFileReader(p.tmpfile) + if err != nil { + return err + } + + err = r.be.Save(ctx, h, rd) + if err != nil { + debug.Log("Save(%v) error: %v", h, err) + return err + } + + debug.Log("saved as %v", h) + + if t == restic.TreeBlob && r.Cache != nil { + debug.Log("saving tree pack file in cache") + + _, err = p.tmpfile.Seek(0, 0) + if err != nil { + return errors.Wrap(err, "Seek") + } + + err := r.Cache.Save(h, p.tmpfile) + if err != nil { + return err + } + } + + err = p.tmpfile.Close() + if err != nil { + return errors.Wrap(err, "close tempfile") + } + + err = fs.RemoveIfExists(p.tmpfile.Name()) + if err != nil { + return errors.Wrap(err, "Remove") + } + + // update blobs in the index + for _, b := range p.Packer.Blobs() { + debug.Log(" updating blob %v to pack %v", b.ID, id) + r.idx.Store(restic.PackedBlob{ + Blob: restic.Blob{ + Type: b.Type, + ID: b.ID, + Offset: b.Offset, + Length: uint(b.Length), + }, + PackID: id, + }) + } + + return nil +} + +// countPacker returns the number of open (unfinished) packers. +func (r *packerManager) countPacker() int { + r.pm.Lock() + defer r.pm.Unlock() + + return len(r.packers) +} diff --git a/internal/repository/packer_manager_test.go b/internal/repository/packer_manager_test.go new file mode 100644 index 000000000..25d78f9b5 --- /dev/null +++ b/internal/repository/packer_manager_test.go @@ -0,0 +1,168 @@ +package repository + +import ( + "context" + "io" + "math/rand" + "os" + "testing" + + "github.com/restic/restic/internal/backend/mem" + "github.com/restic/restic/internal/crypto" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/mock" + "github.com/restic/restic/internal/restic" +) + +type randReader struct { + src rand.Source + rand *rand.Rand +} + +func newRandReader(src rand.Source) *randReader { + return &randReader{ + src: src, + rand: rand.New(src), + } +} + +// Read generates len(p) random bytes and writes them into p. It +// always returns len(p) and a nil error. +func (r *randReader) Read(p []byte) (n int, err error) { + for i := 0; i < len(p); i += 7 { + val := r.src.Int63() + for j := 0; i+j < len(p) && j < 7; j++ { + p[i+j] = byte(val) + val >>= 8 + } + } + return len(p), nil +} + +func randomID(rd io.Reader) restic.ID { + id := restic.ID{} + _, err := io.ReadFull(rd, id[:]) + if err != nil { + panic(err) + } + return id +} + +const maxBlobSize = 1 << 20 + +func saveFile(t testing.TB, be Saver, length int, f *os.File, id restic.ID) { + h := restic.Handle{Type: restic.DataFile, Name: id.String()} + t.Logf("save file %v", h) + + rd, err := restic.NewFileReader(f) + if err != nil { + t.Fatal(err) + } + + err = be.Save(context.TODO(), h, rd) + if err != nil { + t.Fatal(err) + } + + if err := f.Close(); err != nil { + t.Fatal(err) + } + + if err := fs.RemoveIfExists(f.Name()); err != nil { + t.Fatal(err) + } +} + +func fillPacks(t testing.TB, rnd *randReader, be Saver, pm *packerManager, buf []byte) (bytes int) { + for i := 0; i < 100; i++ { + l := rnd.rand.Intn(1 << 20) + seed := rnd.rand.Int63() + + packer, err := pm.findPacker() + if err != nil { + t.Fatal(err) + } + + rd := newRandReader(rand.NewSource(seed)) + id := randomID(rd) + buf = buf[:l] + _, err = io.ReadFull(rd, buf) + if err != nil { + t.Fatal(err) + } + + n, err := packer.Add(restic.DataBlob, id, buf) + if err != nil { + t.Fatal(err) + } + if n != l { + t.Errorf("Add() returned invalid number of bytes: want %v, got %v", n, l) + } + bytes += l + + if packer.Size() < minPackSize { + pm.insertPacker(packer) + continue + } + + _, err = packer.Finalize() + if err != nil { + t.Fatal(err) + } + + packID := restic.IDFromHash(packer.hw.Sum(nil)) + saveFile(t, be, int(packer.Size()), packer.tmpfile, packID) + } + + return bytes +} + +func flushRemainingPacks(t testing.TB, rnd *randReader, be Saver, pm *packerManager) (bytes int) { + if pm.countPacker() > 0 { + for _, packer := range pm.packers { + n, err := packer.Finalize() + if err != nil { + t.Fatal(err) + } + bytes += int(n) + + packID := restic.IDFromHash(packer.hw.Sum(nil)) + saveFile(t, be, int(packer.Size()), packer.tmpfile, packID) + } + } + + return bytes +} + +func TestPackerManager(t *testing.T) { + rnd := newRandReader(rand.NewSource(23)) + + be := mem.New() + pm := newPackerManager(be, crypto.NewRandomKey()) + + blobBuf := make([]byte, maxBlobSize) + + bytes := fillPacks(t, rnd, be, pm, blobBuf) + bytes += flushRemainingPacks(t, rnd, be, pm) + + t.Logf("saved %d bytes", bytes) +} + +func BenchmarkPackerManager(t *testing.B) { + rnd := newRandReader(rand.NewSource(23)) + + be := &mock.Backend{ + SaveFn: func(context.Context, restic.Handle, restic.RewindReader) error { return nil }, + } + blobBuf := make([]byte, maxBlobSize) + + t.ResetTimer() + + for i := 0; i < t.N; i++ { + bytes := 0 + pm := newPackerManager(be, crypto.NewRandomKey()) + bytes += fillPacks(t, rnd, be, pm, blobBuf) + bytes += flushRemainingPacks(t, rnd, be, pm) + t.Logf("saved %d bytes", bytes) + } +} diff --git a/internal/repository/parallel.go b/internal/repository/parallel.go new file mode 100644 index 000000000..154b58bfa --- /dev/null +++ b/internal/repository/parallel.go @@ -0,0 +1,65 @@ +package repository + +import ( + "context" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/restic" + "golang.org/x/sync/errgroup" +) + +// ParallelWorkFunc gets one file ID to work on. If an error is returned, +// processing stops. When the contect is cancelled the function should return. +type ParallelWorkFunc func(ctx context.Context, id string) error + +// ParallelIDWorkFunc gets one restic.ID to work on. If an error is returned, +// processing stops. When the context is cancelled the function should return. +type ParallelIDWorkFunc func(ctx context.Context, id restic.ID) error + +// FilesInParallel runs n workers of f in parallel, on the IDs that +// repo.List(t) yields. If f returns an error, the process is aborted and the +// first error is returned. +func FilesInParallel(ctx context.Context, repo restic.Lister, t restic.FileType, n int, f ParallelWorkFunc) error { + g, ctx := errgroup.WithContext(ctx) + + ch := make(chan string, n) + g.Go(func() error { + defer close(ch) + return repo.List(ctx, t, func(fi restic.FileInfo) error { + select { + case <-ctx.Done(): + case ch <- fi.Name: + } + return nil + }) + }) + + for i := 0; i < n; i++ { + g.Go(func() error { + for name := range ch { + err := f(ctx, name) + if err != nil { + return err + } + } + return nil + }) + } + + return g.Wait() +} + +// ParallelWorkFuncParseID converts a function that takes a restic.ID to a +// function that takes a string. Filenames that do not parse as a restic.ID +// are ignored. +func ParallelWorkFuncParseID(f ParallelIDWorkFunc) ParallelWorkFunc { + return func(ctx context.Context, s string) error { + id, err := restic.ParseID(s) + if err != nil { + debug.Log("invalid ID %q: %v", id, err) + return nil + } + + return f(ctx, id) + } +} diff --git a/internal/repository/parallel_test.go b/internal/repository/parallel_test.go new file mode 100644 index 000000000..7b4c4a583 --- /dev/null +++ b/internal/repository/parallel_test.go @@ -0,0 +1,129 @@ +package repository_test + +import ( + "context" + "math/rand" + "testing" + "time" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + + "github.com/restic/restic/internal/repository" + rtest "github.com/restic/restic/internal/test" +) + +type testIDs []string + +var lister = testIDs{ + "40bb581cd36de952985c97a3ff6b21df41ee897d4db2040354caa36a17ff5268", + "2e15811a4d14ffac66d36a9ff456019d8de4c10c949d45b643f8477d17e92ff3", + "70c11b3ed521ad6b76d905c002ca98b361fca06aca060a063432c7311155a4da", + "8056a33e75dccdda701b6c989c7ed0cb71bbb6da13c6427fe5986f0896cc91c0", + "79d8776200596aa0237b10d470f7b850b86f8a1a80988ef5c8bee2874ce992e2", + "f9f1f29791c6b79b90b35efd083f17a3b163bbbafb1a2fdf43d46d56cffda289", + "3834178d05d0f6dd07f872ee0262ff1ace0f0f375768227d3c902b0b66591369", + "66d5cc68c9186414806f366ae5493ce7f229212993750a4992be4030f6af28c5", + "ebca5af4f397944f68cd215e3dfa2b197a7ba0f7c17d65d9f7390d0a15cde296", + "d4511ce6ff732d106275a57e40745c599e987c0da44c42cddbef592aac102437", + "f366202f0bfeefaedd7b49e2f21a90d3cbddb97d257a74d788dd34e19a684dae", + "a5c17728ab2433cd50636dd5c6c7068c7a44f2999d09c46e8f528466da8a059d", + "bae0f9492b9b208233029b87692a1a55cbd7fbe1cf3f6d7bc693ac266a6d6f0e", + "9d500187913c7510d71d1902703d312c7aaa56f1e98351385b9535fdabae595e", + "ffbddd8a4c1e54d258bb3e16d3929b546b61af63cb560b3e3061a8bef5b24552", + "201bb3abf655e7ef71e79ed4fb1079b0502b5acb4d9fad5e72a0de690c50a386", + "08eb57bbd559758ea96e99f9b7688c30e7b3bcf0c4562ff4535e2d8edeffaeed", + "e50b7223b04985ff38d9e11d1cba333896ef4264f82bd5d0653a028bce70e542", + "65a9421cd59cc7b7a71dcd9076136621af607fb4701d2e5c2af23b6396cf2f37", + "995a655b3521c19b4d0c266222266d89c8fc62889597d61f45f336091e646d57", + "51ec6f0bce77ed97df2dd7ae849338c3a8155a057da927eedd66e3d61be769ad", + "7b3923a0c0666431efecdbf6cb171295ec1710b6595eebcba3b576b49d13e214", + "2cedcc3d14698bea7e4b0546f7d5d48951dd90add59e6f2d44b693fd8913717d", + "fd6770cbd54858fdbd3d7b4239b985e5599180064d93ca873f27e86e8407d011", + "9edc51d8e6e04d05c9757848c1bfbfdc8e86b6330982294632488922e59fdb1b", + "1a6c4fbb24ad724c968b2020417c3d057e6c89e49bdfb11d91006def65eab6a0", + "cb3b29808cd0adfa2dca1f3a04f98114fbccf4eb487cdd4022f49bd70eeb049b", + "f55edcb40c619e29a20e432f8aaddc83a649be2c2d1941ccdc474cd2af03d490", + "e8ccc1763a92de23566b95c3ad1414a098016ece69a885fc8a72782a7517d17c", + "0fe2e3db8c5a12ad7101a63a0fffee901be54319cfe146bead7aec851722f82d", + "36be45a6ae7c95ad97cee1b33023be324bce7a7b4b7036e24125679dd9ff5b44", + "1685ed1a57c37859fbef1f7efb7509f20b84ec17a765605de43104d2fa37884b", + "9d83629a6a004c505b100a0b5d0b246833b63aa067aa9b59e3abd6b74bc4d3a8", + "be49a66b60175c5e2ee273b42165f86ef11bb6518c1c79950bcd3f4c196c98bd", + "0fd89885d821761b4a890782908e75793028747d15ace3c6cbf0ad56582b4fa5", + "94a767519a4e352a88796604943841fea21429f3358b4d5d55596dbda7d15dce", + "8dd07994afe6e572ddc9698fb0d13a0d4c26a38b7992818a71a99d1e0ac2b034", + "f7380a6f795ed31fbeb2945c72c5fd1d45044e5ab152311e75e007fa530f5847", + "5ca1ce01458e484393d7e9c8af42b0ff37a73a2fee0f18e14cff0fb180e33014", + "8f44178be3fe0a2bd41f922576fb7a9b19d589754504be746f56c759df328fda", + "12d33847c2be711c989f37360dd7aa8537fd14972262a4530634a08fdf32a767", + "31e077f5080f78846a00093caff2b6b839519cc47516142eeba9c41d4072a605", + "14f01db8a0054e70222b76d2555d70114b4bf8a0f02084324af2df226f14a795", + "7f5dbbaf31b4551828e8e76cef408375db9fbcdcdb6b5949f2d1b0c4b8632132", + "42a5d9b9bb7e4a16f23ba916bcf87f38c1aa1f2de2ab79736f725850a8ff6a1b", + "e06f8f901ea708beba8712a11b6e2d0be7c4b018d0254204ef269bcdf5e8c6cc", + "d9ba75785bf45b0c4fd3b2365c968099242483f2f0d0c7c20306dac11fae96e9", + "428debbb280873907cef2ec099efe1566e42a59775d6ec74ded0c4048d5a6515", + "3b51049d4dae701098e55a69536fa31ad2be1adc17b631a695a40e8a294fe9c0", + "168f88aa4b105e9811f5f79439cc1a689be4eec77f3361d42f22fe8f7ddc74a9", + "0baa0ab2249b33d64449a899cb7bd8eae5231f0d4ff70f09830dc1faa2e4abee", + "0c3896d346b580306a49de29f3a78913a41e14b8461b124628c33a64636241f2", + "b18313f1651c15e100e7179aa3eb8ffa62c3581159eaf7f83156468d19781e42", + "996361f7d988e48267ccc7e930fed4637be35fe7562b8601dceb7a32313a14c8", + "dfb4e6268437d53048d22b811048cd045df15693fc6789affd002a0fc80a6e60", + "34dd044c228727f2226a0c9c06a3e5ceb5e30e31cb7854f8fa1cde846b395a58", +} + +func (tests testIDs) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error { + for i := 0; i < 500; i++ { + for _, id := range tests { + if ctx.Err() != nil { + return ctx.Err() + } + + fi := restic.FileInfo{ + Name: id, + } + + err := fn(fi) + if err != nil { + return err + } + } + } + + return nil +} + +func TestFilesInParallel(t *testing.T) { + f := func(ctx context.Context, id string) error { + time.Sleep(1 * time.Millisecond) + return nil + } + + for n := 1; n < 5; n++ { + err := repository.FilesInParallel(context.TODO(), lister, restic.DataFile, n*100, f) + rtest.OK(t, err) + } +} + +var errTest = errors.New("test error") + +func TestFilesInParallelWithError(t *testing.T) { + f := func(ctx context.Context, id string) error { + time.Sleep(1 * time.Millisecond) + + if rand.Float32() < 0.01 { + return errTest + } + + return nil + } + + for n := 1; n < 5; n++ { + err := repository.FilesInParallel(context.TODO(), lister, restic.DataFile, n*100, f) + if err != errTest { + t.Fatalf("wrong error returned, want %q, got %v", errTest, err) + } + } +} diff --git a/internal/repository/pool.go b/internal/repository/pool.go new file mode 100644 index 000000000..b87791f14 --- /dev/null +++ b/internal/repository/pool.go @@ -0,0 +1,21 @@ +package repository + +import ( + "sync" + + "github.com/restic/chunker" +) + +var bufPool = sync.Pool{ + New: func() interface{} { + return make([]byte, chunker.MaxSize/3) + }, +} + +func getBuf() []byte { + return bufPool.Get().([]byte) +} + +func freeBuf(data []byte) { + bufPool.Put(data) +} diff --git a/internal/repository/repack.go b/internal/repository/repack.go new file mode 100644 index 000000000..d0119c204 --- /dev/null +++ b/internal/repository/repack.go @@ -0,0 +1,114 @@ +package repository + +import ( + "context" + "fmt" + "os" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/pack" + "github.com/restic/restic/internal/restic" +) + +// Repack takes a list of packs together with a list of blobs contained in +// these packs. Each pack is loaded and the blobs listed in keepBlobs is saved +// into a new pack. Returned is the list of obsolete packs which can then +// be removed. +func Repack(ctx context.Context, repo restic.Repository, packs restic.IDSet, keepBlobs restic.BlobSet, p *restic.Progress) (obsoletePacks restic.IDSet, err error) { + debug.Log("repacking %d packs while keeping %d blobs", len(packs), len(keepBlobs)) + + for packID := range packs { + // load the complete pack into a temp file + h := restic.Handle{Type: restic.DataFile, Name: packID.String()} + + tempfile, hash, packLength, err := DownloadAndHash(ctx, repo.Backend(), h) + if err != nil { + return nil, errors.Wrap(err, "Repack") + } + + debug.Log("pack %v loaded (%d bytes), hash %v", packID, packLength, hash) + + if !packID.Equal(hash) { + return nil, errors.Errorf("hash does not match id: want %v, got %v", packID, hash) + } + + _, err = tempfile.Seek(0, 0) + if err != nil { + return nil, errors.Wrap(err, "Seek") + } + + blobs, err := pack.List(repo.Key(), tempfile, packLength) + if err != nil { + return nil, err + } + + debug.Log("processing pack %v, blobs: %v", packID, len(blobs)) + var buf []byte + for _, entry := range blobs { + h := restic.BlobHandle{ID: entry.ID, Type: entry.Type} + if !keepBlobs.Has(h) { + continue + } + + debug.Log(" process blob %v", h) + + buf = buf[:] + if uint(len(buf)) < entry.Length { + buf = make([]byte, entry.Length) + } + buf = buf[:entry.Length] + + n, err := tempfile.ReadAt(buf, int64(entry.Offset)) + if err != nil { + return nil, errors.Wrap(err, "ReadAt") + } + + if n != len(buf) { + return nil, errors.Errorf("read blob %v from %v: not enough bytes read, want %v, got %v", + h, tempfile.Name(), len(buf), n) + } + + nonce, ciphertext := buf[:repo.Key().NonceSize()], buf[repo.Key().NonceSize():] + plaintext, err := repo.Key().Open(ciphertext[:0], nonce, ciphertext, nil) + if err != nil { + return nil, err + } + + id := restic.Hash(plaintext) + if !id.Equal(entry.ID) { + debug.Log("read blob %v/%v from %v: wrong data returned, hash is %v", + h.Type, h.ID, tempfile.Name(), id) + fmt.Fprintf(os.Stderr, "read blob %v from %v: wrong data returned, hash is %v", + h, tempfile.Name(), id) + } + + _, err = repo.SaveBlob(ctx, entry.Type, plaintext, entry.ID) + if err != nil { + return nil, err + } + + debug.Log(" saved blob %v", entry.ID) + + keepBlobs.Delete(h) + } + + if err = tempfile.Close(); err != nil { + return nil, errors.Wrap(err, "Close") + } + + if err = fs.RemoveIfExists(tempfile.Name()); err != nil { + return nil, errors.Wrap(err, "Remove") + } + if p != nil { + p.Report(restic.Stat{Blobs: 1}) + } + } + + if err := repo.Flush(ctx); err != nil { + return nil, err + } + + return packs, nil +} diff --git a/internal/repository/repack_test.go b/internal/repository/repack_test.go new file mode 100644 index 000000000..286962f49 --- /dev/null +++ b/internal/repository/repack_test.go @@ -0,0 +1,253 @@ +package repository_test + +import ( + "context" + "io" + "math/rand" + "testing" + + "github.com/restic/restic/internal/index" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" +) + +func randomSize(min, max int) int { + return rand.Intn(max-min) + min +} + +func random(t testing.TB, length int) []byte { + rd := restic.NewRandReader(rand.New(rand.NewSource(rand.Int63()))) + buf := make([]byte, length) + _, err := io.ReadFull(rd, buf) + if err != nil { + t.Fatalf("unable to read %d random bytes: %v", length, err) + } + + return buf +} + +func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData float32) { + for i := 0; i < blobs; i++ { + var ( + tpe restic.BlobType + length int + ) + + if rand.Float32() < pData { + tpe = restic.DataBlob + length = randomSize(10*1024, 1024*1024) // 10KiB to 1MiB of data + } else { + tpe = restic.TreeBlob + length = randomSize(1*1024, 20*1024) // 1KiB to 20KiB + } + + buf := random(t, length) + id := restic.Hash(buf) + + if repo.Index().Has(id, restic.DataBlob) { + t.Errorf("duplicate blob %v/%v ignored", id, restic.DataBlob) + continue + } + + _, err := repo.SaveBlob(context.TODO(), tpe, buf, id) + if err != nil { + t.Fatalf("SaveFrom() error %v", err) + } + + if rand.Float32() < 0.2 { + if err = repo.Flush(context.Background()); err != nil { + t.Fatalf("repo.Flush() returned error %v", err) + } + } + } + + if err := repo.Flush(context.Background()); err != nil { + t.Fatalf("repo.Flush() returned error %v", err) + } +} + +// selectBlobs splits the list of all blobs randomly into two lists. A blob +// will be contained in the firstone ith probability p. +func selectBlobs(t *testing.T, repo restic.Repository, p float32) (list1, list2 restic.BlobSet) { + list1 = restic.NewBlobSet() + list2 = restic.NewBlobSet() + + blobs := restic.NewBlobSet() + + err := repo.List(context.TODO(), restic.DataFile, func(id restic.ID, size int64) error { + entries, _, err := repo.ListPack(context.TODO(), id, size) + if err != nil { + t.Fatalf("error listing pack %v: %v", id, err) + } + + for _, entry := range entries { + h := restic.BlobHandle{ID: entry.ID, Type: entry.Type} + if blobs.Has(h) { + t.Errorf("ignoring duplicate blob %v", h) + return nil + } + blobs.Insert(h) + + if rand.Float32() <= p { + list1.Insert(restic.BlobHandle{ID: entry.ID, Type: entry.Type}) + } else { + list2.Insert(restic.BlobHandle{ID: entry.ID, Type: entry.Type}) + } + } + return nil + }) + if err != nil { + t.Fatal(err) + } + + return list1, list2 +} + +func listPacks(t *testing.T, repo restic.Repository) restic.IDSet { + list := restic.NewIDSet() + err := repo.List(context.TODO(), restic.DataFile, func(id restic.ID, size int64) error { + list.Insert(id) + return nil + }) + + if err != nil { + t.Fatal(err) + } + + return list +} + +func findPacksForBlobs(t *testing.T, repo restic.Repository, blobs restic.BlobSet) restic.IDSet { + packs := restic.NewIDSet() + + idx := repo.Index() + for h := range blobs { + list, found := idx.Lookup(h.ID, h.Type) + if !found { + t.Fatal("Failed to find blob", h.ID.Str(), "with type", h.Type) + } + + for _, pb := range list { + packs.Insert(pb.PackID) + } + } + + return packs +} + +func repack(t *testing.T, repo restic.Repository, packs restic.IDSet, blobs restic.BlobSet) { + repackedBlobs, err := repository.Repack(context.TODO(), repo, packs, blobs, nil) + if err != nil { + t.Fatal(err) + } + + for id := range repackedBlobs { + err = repo.Backend().Remove(context.TODO(), restic.Handle{Type: restic.DataFile, Name: id.String()}) + if err != nil { + t.Fatal(err) + } + } +} + +func saveIndex(t *testing.T, repo restic.Repository) { + if err := repo.SaveIndex(context.TODO()); err != nil { + t.Fatalf("repo.SaveIndex() %v", err) + } +} + +func rebuildIndex(t *testing.T, repo restic.Repository) { + idx, _, err := index.New(context.TODO(), repo, restic.NewIDSet(), nil) + if err != nil { + t.Fatal(err) + } + + err = repo.List(context.TODO(), restic.IndexFile, func(id restic.ID, size int64) error { + h := restic.Handle{ + Type: restic.IndexFile, + Name: id.String(), + } + return repo.Backend().Remove(context.TODO(), h) + }) + if err != nil { + t.Fatal(err) + } + + _, err = idx.Save(context.TODO(), repo, nil) + if err != nil { + t.Fatal(err) + } +} + +func reloadIndex(t *testing.T, repo restic.Repository) { + repo.SetIndex(repository.NewMasterIndex()) + if err := repo.LoadIndex(context.TODO()); err != nil { + t.Fatalf("error loading new index: %v", err) + } +} + +func TestRepack(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + seed := rand.Int63() + rand.Seed(seed) + t.Logf("rand seed is %v", seed) + + createRandomBlobs(t, repo, 100, 0.7) + + packsBefore := listPacks(t, repo) + + // Running repack on empty ID sets should not do anything at all. + repack(t, repo, nil, nil) + + packsAfter := listPacks(t, repo) + + if !packsAfter.Equals(packsBefore) { + t.Fatalf("packs are not equal, Repack modified something. Before:\n %v\nAfter:\n %v", + packsBefore, packsAfter) + } + + saveIndex(t, repo) + + removeBlobs, keepBlobs := selectBlobs(t, repo, 0.2) + + removePacks := findPacksForBlobs(t, repo, removeBlobs) + + repack(t, repo, removePacks, keepBlobs) + rebuildIndex(t, repo) + reloadIndex(t, repo) + + packsAfter = listPacks(t, repo) + for id := range removePacks { + if packsAfter.Has(id) { + t.Errorf("pack %v still present although it should have been repacked and removed", id.Str()) + } + } + + idx := repo.Index() + + for h := range keepBlobs { + list, found := idx.Lookup(h.ID, h.Type) + if !found { + t.Errorf("unable to find blob %v in repo", h.ID.Str()) + continue + } + + if len(list) != 1 { + t.Errorf("expected one pack in the list, got: %v", list) + continue + } + + pb := list[0] + + if removePacks.Has(pb.PackID) { + t.Errorf("lookup returned pack ID %v that should've been removed", pb.PackID) + } + } + + for h := range removeBlobs { + if _, found := idx.Lookup(h.ID, h.Type); found { + t.Errorf("blob %v still contained in the repo", h) + } + } +} diff --git a/internal/repository/repository.go b/internal/repository/repository.go new file mode 100644 index 000000000..1a6e5c505 --- /dev/null +++ b/internal/repository/repository.go @@ -0,0 +1,737 @@ +package repository + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "io" + "os" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/cache" + "github.com/restic/restic/internal/crypto" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/hashing" + "github.com/restic/restic/internal/pack" + "github.com/restic/restic/internal/restic" +) + +// Repository is used to access a repository in a backend. +type Repository struct { + be restic.Backend + cfg restic.Config + key *crypto.Key + keyName string + idx *MasterIndex + restic.Cache + + treePM *packerManager + dataPM *packerManager +} + +// New returns a new repository with backend be. +func New(be restic.Backend) *Repository { + repo := &Repository{ + be: be, + idx: NewMasterIndex(), + dataPM: newPackerManager(be, nil), + treePM: newPackerManager(be, nil), + } + + return repo +} + +// Config returns the repository configuration. +func (r *Repository) Config() restic.Config { + return r.cfg +} + +// UseCache replaces the backend with the wrapped cache. +func (r *Repository) UseCache(c restic.Cache) { + if c == nil { + return + } + debug.Log("using cache") + r.Cache = c + r.be = c.Wrap(r.be) +} + +// PrefixLength returns the number of bytes required so that all prefixes of +// all IDs of type t are unique. +func (r *Repository) PrefixLength(t restic.FileType) (int, error) { + return restic.PrefixLength(r.be, t) +} + +// LoadAndDecrypt loads and decrypts data identified by t and id from the +// backend. +func (r *Repository) LoadAndDecrypt(ctx context.Context, t restic.FileType, id restic.ID) (buf []byte, err error) { + debug.Log("load %v with id %v", t, id) + + h := restic.Handle{Type: t, Name: id.String()} + buf, err = backend.LoadAll(ctx, r.be, h) + if err != nil { + debug.Log("error loading %v: %v", h, err) + return nil, err + } + + if t != restic.ConfigFile && !restic.Hash(buf).Equal(id) { + return nil, errors.Errorf("load %v: invalid data returned", h) + } + + nonce, ciphertext := buf[:r.key.NonceSize()], buf[r.key.NonceSize():] + plaintext, err := r.key.Open(ciphertext[:0], nonce, ciphertext, nil) + if err != nil { + return nil, err + } + + return plaintext, nil +} + +// sortCachedPacks moves all cached pack files to the front of blobs. +func (r *Repository) sortCachedPacks(blobs []restic.PackedBlob) []restic.PackedBlob { + if r.Cache == nil { + return blobs + } + + cached := make([]restic.PackedBlob, 0, len(blobs)/2) + noncached := make([]restic.PackedBlob, 0, len(blobs)/2) + + for _, blob := range blobs { + if r.Cache.Has(restic.Handle{Type: restic.DataFile, Name: blob.PackID.String()}) { + cached = append(cached, blob) + continue + } + noncached = append(noncached, blob) + } + + return append(cached, noncached...) +} + +// loadBlob tries to load and decrypt content identified by t and id from a +// pack from the backend, the result is stored in plaintextBuf, which must be +// large enough to hold the complete blob. +func (r *Repository) loadBlob(ctx context.Context, id restic.ID, t restic.BlobType, plaintextBuf []byte) (int, error) { + debug.Log("load %v with id %v (buf len %v, cap %d)", t, id, len(plaintextBuf), cap(plaintextBuf)) + + // lookup packs + blobs, found := r.idx.Lookup(id, t) + if !found { + debug.Log("id %v not found in index", id) + return 0, errors.Errorf("id %v not found in repository", id) + } + + // try cached pack files first + blobs = r.sortCachedPacks(blobs) + + var lastError error + for _, blob := range blobs { + debug.Log("blob %v/%v found: %v", t, id, blob) + + if blob.Type != t { + debug.Log("blob %v has wrong block type, want %v", blob, t) + } + + // load blob from pack + h := restic.Handle{Type: restic.DataFile, Name: blob.PackID.String()} + + if uint(cap(plaintextBuf)) < blob.Length { + return 0, errors.Errorf("buffer is too small: %v < %v", cap(plaintextBuf), blob.Length) + } + + plaintextBuf = plaintextBuf[:blob.Length] + + n, err := restic.ReadAt(ctx, r.be, h, int64(blob.Offset), plaintextBuf) + if err != nil { + debug.Log("error loading blob %v: %v", blob, err) + lastError = err + continue + } + + if uint(n) != blob.Length { + lastError = errors.Errorf("error loading blob %v: wrong length returned, want %d, got %d", + id.Str(), blob.Length, uint(n)) + debug.Log("lastError: %v", lastError) + continue + } + + // decrypt + nonce, ciphertext := plaintextBuf[:r.key.NonceSize()], plaintextBuf[r.key.NonceSize():] + plaintext, err := r.key.Open(ciphertext[:0], nonce, ciphertext, nil) + if err != nil { + lastError = errors.Errorf("decrypting blob %v failed: %v", id, err) + continue + } + + // check hash + if !restic.Hash(plaintext).Equal(id) { + lastError = errors.Errorf("blob %v returned invalid hash", id) + continue + } + + // move decrypted data to the start of the provided buffer + copy(plaintextBuf[0:], plaintext) + return len(plaintext), nil + } + + if lastError != nil { + return 0, lastError + } + + return 0, errors.Errorf("loading blob %v from %v packs failed", id.Str(), len(blobs)) +} + +// LoadJSONUnpacked decrypts the data and afterwards calls json.Unmarshal on +// the item. +func (r *Repository) LoadJSONUnpacked(ctx context.Context, t restic.FileType, id restic.ID, item interface{}) (err error) { + buf, err := r.LoadAndDecrypt(ctx, t, id) + if err != nil { + return err + } + + return json.Unmarshal(buf, item) +} + +// LookupBlobSize returns the size of blob id. +func (r *Repository) LookupBlobSize(id restic.ID, tpe restic.BlobType) (uint, bool) { + return r.idx.LookupSize(id, tpe) +} + +// SaveAndEncrypt encrypts data and stores it to the backend as type t. If data +// is small enough, it will be packed together with other small blobs. +func (r *Repository) SaveAndEncrypt(ctx context.Context, t restic.BlobType, data []byte, id *restic.ID) (restic.ID, error) { + if id == nil { + // compute plaintext hash + hashedID := restic.Hash(data) + id = &hashedID + } + + debug.Log("save id %v (%v, %d bytes)", id, t, len(data)) + + // get buf from the pool + ciphertext := getBuf() + + ciphertext = ciphertext[:0] + nonce := crypto.NewRandomNonce() + ciphertext = append(ciphertext, nonce...) + defer freeBuf(ciphertext) + + // encrypt blob + ciphertext = r.key.Seal(ciphertext, nonce, data, nil) + + // find suitable packer and add blob + var pm *packerManager + + switch t { + case restic.TreeBlob: + pm = r.treePM + case restic.DataBlob: + pm = r.dataPM + default: + panic(fmt.Sprintf("invalid type: %v", t)) + } + + packer, err := pm.findPacker() + if err != nil { + return restic.ID{}, err + } + + // save ciphertext + _, err = packer.Add(t, *id, ciphertext) + if err != nil { + return restic.ID{}, err + } + + // if the pack is not full enough, put back to the list + if packer.Size() < minPackSize { + debug.Log("pack is not full enough (%d bytes)", packer.Size()) + pm.insertPacker(packer) + return *id, nil + } + + // else write the pack to the backend + return *id, r.savePacker(ctx, t, packer) +} + +// SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the +// backend as type t, without a pack. It returns the storage hash. +func (r *Repository) SaveJSONUnpacked(ctx context.Context, t restic.FileType, item interface{}) (restic.ID, error) { + debug.Log("save new blob %v", t) + plaintext, err := json.Marshal(item) + if err != nil { + return restic.ID{}, errors.Wrap(err, "json.Marshal") + } + + return r.SaveUnpacked(ctx, t, plaintext) +} + +// SaveUnpacked encrypts data and stores it in the backend. Returned is the +// storage hash. +func (r *Repository) SaveUnpacked(ctx context.Context, t restic.FileType, p []byte) (id restic.ID, err error) { + ciphertext := restic.NewBlobBuffer(len(p)) + ciphertext = ciphertext[:0] + nonce := crypto.NewRandomNonce() + ciphertext = append(ciphertext, nonce...) + + ciphertext = r.key.Seal(ciphertext, nonce, p, nil) + + id = restic.Hash(ciphertext) + h := restic.Handle{Type: t, Name: id.String()} + + err = r.be.Save(ctx, h, restic.NewByteReader(ciphertext)) + if err != nil { + debug.Log("error saving blob %v: %v", h, err) + return restic.ID{}, err + } + + debug.Log("blob %v saved", h) + return id, nil +} + +// Flush saves all remaining packs. +func (r *Repository) Flush(ctx context.Context) error { + pms := []struct { + t restic.BlobType + pm *packerManager + }{ + {restic.DataBlob, r.dataPM}, + {restic.TreeBlob, r.treePM}, + } + + for _, p := range pms { + p.pm.pm.Lock() + + debug.Log("manually flushing %d packs", len(p.pm.packers)) + for _, packer := range p.pm.packers { + err := r.savePacker(ctx, p.t, packer) + if err != nil { + p.pm.pm.Unlock() + return err + } + } + p.pm.packers = p.pm.packers[:0] + p.pm.pm.Unlock() + } + + return nil +} + +// Backend returns the backend for the repository. +func (r *Repository) Backend() restic.Backend { + return r.be +} + +// Index returns the currently used MasterIndex. +func (r *Repository) Index() restic.Index { + return r.idx +} + +// SetIndex instructs the repository to use the given index. +func (r *Repository) SetIndex(i restic.Index) error { + r.idx = i.(*MasterIndex) + + ids := restic.NewIDSet() + for _, idx := range r.idx.All() { + id, err := idx.ID() + if err != nil { + debug.Log("not using index, ID() returned error %v", err) + continue + } + ids.Insert(id) + } + + return r.PrepareCache(ids) +} + +// SaveIndex saves an index in the repository. +func SaveIndex(ctx context.Context, repo restic.Repository, index *Index) (restic.ID, error) { + buf := bytes.NewBuffer(nil) + + err := index.Finalize(buf) + if err != nil { + return restic.ID{}, err + } + + return repo.SaveUnpacked(ctx, restic.IndexFile, buf.Bytes()) +} + +// saveIndex saves all indexes in the backend. +func (r *Repository) saveIndex(ctx context.Context, indexes ...*Index) error { + for i, idx := range indexes { + debug.Log("Saving index %d", i) + + sid, err := SaveIndex(ctx, r, idx) + if err != nil { + return err + } + + debug.Log("Saved index %d as %v", i, sid) + } + + return nil +} + +// SaveIndex saves all new indexes in the backend. +func (r *Repository) SaveIndex(ctx context.Context) error { + return r.saveIndex(ctx, r.idx.NotFinalIndexes()...) +} + +// SaveFullIndex saves all full indexes in the backend. +func (r *Repository) SaveFullIndex(ctx context.Context) error { + return r.saveIndex(ctx, r.idx.FullIndexes()...) +} + +const loadIndexParallelism = 4 + +// LoadIndex loads all index files from the backend in parallel and stores them +// in the master index. The first error that occurred is returned. +func (r *Repository) LoadIndex(ctx context.Context) error { + debug.Log("Loading index") + + errCh := make(chan error, 1) + indexes := make(chan *Index) + + worker := func(ctx context.Context, id restic.ID) error { + idx, err := LoadIndex(ctx, r, id) + if err != nil { + fmt.Fprintf(os.Stderr, "%v, ignoring\n", err) + return nil + } + + select { + case indexes <- idx: + case <-ctx.Done(): + } + + return nil + } + + go func() { + defer close(indexes) + errCh <- FilesInParallel(ctx, r.be, restic.IndexFile, loadIndexParallelism, + ParallelWorkFuncParseID(worker)) + }() + + validIndex := restic.NewIDSet() + for idx := range indexes { + id, err := idx.ID() + if err == nil { + validIndex.Insert(id) + } + r.idx.Insert(idx) + } + + err := r.PrepareCache(validIndex) + if err != nil { + return err + } + + return <-errCh +} + +// PrepareCache initializes the local cache. indexIDs is the list of IDs of +// index files still present in the repo. +func (r *Repository) PrepareCache(indexIDs restic.IDSet) error { + if r.Cache == nil { + return nil + } + + debug.Log("prepare cache with %d index files", len(indexIDs)) + + // clear old index files + err := r.Cache.Clear(restic.IndexFile, indexIDs) + if err != nil { + fmt.Fprintf(os.Stderr, "error clearing index files in cache: %v\n", err) + } + + packs := restic.NewIDSet() + for _, idx := range r.idx.All() { + for id := range idx.Packs() { + packs.Insert(id) + } + } + + // clear old data files + err = r.Cache.Clear(restic.DataFile, packs) + if err != nil { + fmt.Fprintf(os.Stderr, "error clearing data files in cache: %v\n", err) + } + + treePacks := restic.NewIDSet() + for _, idx := range r.idx.All() { + for _, id := range idx.TreePacks() { + treePacks.Insert(id) + } + } + + // use readahead + debug.Log("using readahead") + cache := r.Cache.(*cache.Cache) + cache.PerformReadahead = func(h restic.Handle) bool { + if h.Type != restic.DataFile { + debug.Log("no readahead for %v, is not data file", h) + return false + } + + id, err := restic.ParseID(h.Name) + if err != nil { + debug.Log("no readahead for %v, invalid ID", h) + return false + } + + if treePacks.Has(id) { + debug.Log("perform readahead for %v", h) + return true + } + debug.Log("no readahead for %v, not tree file", h) + return false + } + + return nil +} + +// LoadIndex loads the index id from backend and returns it. +func LoadIndex(ctx context.Context, repo restic.Repository, id restic.ID) (*Index, error) { + idx, err := LoadIndexWithDecoder(ctx, repo, id, DecodeIndex) + if err == nil { + return idx, nil + } + + if errors.Cause(err) == ErrOldIndexFormat { + fmt.Fprintf(os.Stderr, "index %v has old format\n", id.Str()) + return LoadIndexWithDecoder(ctx, repo, id, DecodeOldIndex) + } + + return nil, err +} + +// SearchKey finds a key with the supplied password, afterwards the config is +// read and parsed. It tries at most maxKeys key files in the repo. +func (r *Repository) SearchKey(ctx context.Context, password string, maxKeys int, keyHint string) error { + key, err := SearchKey(ctx, r, password, maxKeys, keyHint) + if err != nil { + return err + } + + r.key = key.master + r.dataPM.key = key.master + r.treePM.key = key.master + r.keyName = key.Name() + r.cfg, err = restic.LoadConfig(ctx, r) + if err != nil { + return errors.Fatalf("config cannot be loaded: %v", err) + } + return nil +} + +// Init creates a new master key with the supplied password, initializes and +// saves the repository config. +func (r *Repository) Init(ctx context.Context, password string) error { + has, err := r.be.Test(ctx, restic.Handle{Type: restic.ConfigFile}) + if err != nil { + return err + } + if has { + return errors.New("repository master key and config already initialized") + } + + cfg, err := restic.CreateConfig() + if err != nil { + return err + } + + return r.init(ctx, password, cfg) +} + +// init creates a new master key with the supplied password and uses it to save +// the config into the repo. +func (r *Repository) init(ctx context.Context, password string, cfg restic.Config) error { + key, err := createMasterKey(r, password) + if err != nil { + return err + } + + r.key = key.master + r.dataPM.key = key.master + r.treePM.key = key.master + r.keyName = key.Name() + r.cfg = cfg + _, err = r.SaveJSONUnpacked(ctx, restic.ConfigFile, cfg) + return err +} + +// Key returns the current master key. +func (r *Repository) Key() *crypto.Key { + return r.key +} + +// KeyName returns the name of the current key in the backend. +func (r *Repository) KeyName() string { + return r.keyName +} + +// List runs fn for all files of type t in the repo. +func (r *Repository) List(ctx context.Context, t restic.FileType, fn func(restic.ID, int64) error) error { + return r.be.List(ctx, t, func(fi restic.FileInfo) error { + id, err := restic.ParseID(fi.Name) + if err != nil { + debug.Log("unable to parse %v as an ID", fi.Name) + return nil + } + return fn(id, fi.Size) + }) +} + +// ListPack returns the list of blobs saved in the pack id and the length of +// the file as stored in the backend. +func (r *Repository) ListPack(ctx context.Context, id restic.ID, size int64) ([]restic.Blob, int64, error) { + h := restic.Handle{Type: restic.DataFile, Name: id.String()} + + blobs, err := pack.List(r.Key(), restic.ReaderAt(r.Backend(), h), size) + if err != nil { + return nil, 0, err + } + + return blobs, size, nil +} + +// Delete calls backend.Delete() if implemented, and returns an error +// otherwise. +func (r *Repository) Delete(ctx context.Context) error { + return r.be.Delete(ctx) +} + +// Close closes the repository by closing the backend. +func (r *Repository) Close() error { + return r.be.Close() +} + +// LoadBlob loads a blob of type t from the repository to the buffer. buf must +// be large enough to hold the encrypted blob, since it is used as scratch +// space. +func (r *Repository) LoadBlob(ctx context.Context, t restic.BlobType, id restic.ID, buf []byte) (int, error) { + debug.Log("load blob %v into buf (len %v, cap %v)", id, len(buf), cap(buf)) + size, found := r.idx.LookupSize(id, t) + if !found { + return 0, errors.Errorf("id %v not found in repository", id) + } + + if cap(buf) < restic.CiphertextLength(int(size)) { + return 0, errors.Errorf("buffer is too small for data blob (%d < %d)", cap(buf), restic.CiphertextLength(int(size))) + } + + n, err := r.loadBlob(ctx, id, t, buf) + if err != nil { + return 0, err + } + buf = buf[:n] + + debug.Log("loaded %d bytes into buf %p", len(buf), buf) + + return len(buf), err +} + +// SaveBlob saves a blob of type t into the repository. If id is the null id, it +// will be computed and returned. +func (r *Repository) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID) (restic.ID, error) { + var i *restic.ID + if !id.IsNull() { + i = &id + } + return r.SaveAndEncrypt(ctx, t, buf, i) +} + +// LoadTree loads a tree from the repository. +func (r *Repository) LoadTree(ctx context.Context, id restic.ID) (*restic.Tree, error) { + debug.Log("load tree %v", id) + + size, found := r.idx.LookupSize(id, restic.TreeBlob) + if !found { + return nil, errors.Errorf("tree %v not found in repository", id) + } + + debug.Log("size is %d, create buffer", size) + buf := restic.NewBlobBuffer(int(size)) + + n, err := r.loadBlob(ctx, id, restic.TreeBlob, buf) + if err != nil { + return nil, err + } + buf = buf[:n] + + t := &restic.Tree{} + err = json.Unmarshal(buf, t) + if err != nil { + return nil, err + } + + return t, nil +} + +// SaveTree stores a tree into the repository and returns the ID. The ID is +// checked against the index. The tree is only stored when the index does not +// contain the ID. +func (r *Repository) SaveTree(ctx context.Context, t *restic.Tree) (restic.ID, error) { + buf, err := json.Marshal(t) + if err != nil { + return restic.ID{}, errors.Wrap(err, "MarshalJSON") + } + + // append a newline so that the data is always consistent (json.Encoder + // adds a newline after each object) + buf = append(buf, '\n') + + id := restic.Hash(buf) + if r.idx.Has(id, restic.TreeBlob) { + return id, nil + } + + _, err = r.SaveBlob(ctx, restic.TreeBlob, buf, id) + return id, err +} + +// Loader allows loading data from a backend. +type Loader interface { + Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error +} + +// DownloadAndHash is all-in-one helper to download content of the file at h to a temporary filesystem location +// and calculate ID of the contents. Returned (temporary) file is positioned at the beginning of the file; +// it is reponsibility of the caller to close and delete the file. +func DownloadAndHash(ctx context.Context, be Loader, h restic.Handle) (tmpfile *os.File, hash restic.ID, size int64, err error) { + tmpfile, err = fs.TempFile("", "restic-temp-") + if err != nil { + return nil, restic.ID{}, -1, errors.Wrap(err, "TempFile") + } + + err = be.Load(ctx, h, 0, 0, func(rd io.Reader) (ierr error) { + _, ierr = tmpfile.Seek(0, io.SeekStart) + if ierr == nil { + ierr = tmpfile.Truncate(0) + } + if ierr != nil { + return ierr + } + hrd := hashing.NewReader(rd, sha256.New()) + size, ierr = io.Copy(tmpfile, hrd) + hash = restic.IDFromHash(hrd.Sum(nil)) + return ierr + }) + if err != nil { + tmpfile.Close() + os.Remove(tmpfile.Name()) + return nil, restic.ID{}, -1, errors.Wrap(err, "Load") + } + + _, err = tmpfile.Seek(0, io.SeekStart) + if err != nil { + tmpfile.Close() + os.Remove(tmpfile.Name()) + return nil, restic.ID{}, -1, errors.Wrap(err, "Seek") + } + + return tmpfile, hash, size, err +} diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go new file mode 100644 index 000000000..8ea203d59 --- /dev/null +++ b/internal/repository/repository_test.go @@ -0,0 +1,500 @@ +package repository_test + +import ( + "bytes" + "context" + "crypto/sha256" + "io" + "math/rand" + "path/filepath" + "testing" + "time" + + "github.com/restic/restic/internal/archiver" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +var testSizes = []int{5, 23, 2<<18 + 23, 1 << 20} + +var rnd = rand.New(rand.NewSource(time.Now().UnixNano())) + +func TestSave(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + for _, size := range testSizes { + data := make([]byte, size) + _, err := io.ReadFull(rnd, data) + rtest.OK(t, err) + + id := restic.Hash(data) + + // save + sid, err := repo.SaveBlob(context.TODO(), restic.DataBlob, data, restic.ID{}) + rtest.OK(t, err) + + rtest.Equals(t, id, sid) + + rtest.OK(t, repo.Flush(context.Background())) + // rtest.OK(t, repo.SaveIndex()) + + // read back + buf := restic.NewBlobBuffer(size) + n, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, buf) + rtest.OK(t, err) + rtest.Equals(t, len(buf), n) + + rtest.Assert(t, len(buf) == len(data), + "number of bytes read back does not match: expected %d, got %d", + len(data), len(buf)) + + rtest.Assert(t, bytes.Equal(buf, data), + "data does not match: expected %02x, got %02x", + data, buf) + } +} + +func TestSaveFrom(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + for _, size := range testSizes { + data := make([]byte, size) + _, err := io.ReadFull(rnd, data) + rtest.OK(t, err) + + id := restic.Hash(data) + + // save + id2, err := repo.SaveBlob(context.TODO(), restic.DataBlob, data, id) + rtest.OK(t, err) + rtest.Equals(t, id, id2) + + rtest.OK(t, repo.Flush(context.Background())) + + // read back + buf := restic.NewBlobBuffer(size) + n, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, buf) + rtest.OK(t, err) + rtest.Equals(t, len(buf), n) + + rtest.Assert(t, len(buf) == len(data), + "number of bytes read back does not match: expected %d, got %d", + len(data), len(buf)) + + rtest.Assert(t, bytes.Equal(buf, data), + "data does not match: expected %02x, got %02x", + data, buf) + } +} + +func BenchmarkSaveAndEncrypt(t *testing.B) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + size := 4 << 20 // 4MiB + + data := make([]byte, size) + _, err := io.ReadFull(rnd, data) + rtest.OK(t, err) + + id := restic.ID(sha256.Sum256(data)) + + t.ResetTimer() + t.SetBytes(int64(size)) + + for i := 0; i < t.N; i++ { + // save + _, err = repo.SaveBlob(context.TODO(), restic.DataBlob, data, id) + rtest.OK(t, err) + } +} + +func TestLoadTree(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + if rtest.BenchArchiveDirectory == "" { + t.Skip("benchdir not set, skipping") + } + + // archive a few files + sn := archiver.TestSnapshot(t, repo, rtest.BenchArchiveDirectory, nil) + rtest.OK(t, repo.Flush(context.Background())) + + _, err := repo.LoadTree(context.TODO(), *sn.Tree) + rtest.OK(t, err) +} + +func BenchmarkLoadTree(t *testing.B) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + if rtest.BenchArchiveDirectory == "" { + t.Skip("benchdir not set, skipping") + } + + // archive a few files + sn := archiver.TestSnapshot(t, repo, rtest.BenchArchiveDirectory, nil) + rtest.OK(t, repo.Flush(context.Background())) + + t.ResetTimer() + + for i := 0; i < t.N; i++ { + _, err := repo.LoadTree(context.TODO(), *sn.Tree) + rtest.OK(t, err) + } +} + +func TestLoadBlob(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + length := 1000000 + buf := restic.NewBlobBuffer(length) + _, err := io.ReadFull(rnd, buf) + rtest.OK(t, err) + + id, err := repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{}) + rtest.OK(t, err) + rtest.OK(t, repo.Flush(context.Background())) + + // first, test with buffers that are too small + for _, testlength := range []int{length - 20, length, restic.CiphertextLength(length) - 1} { + buf = make([]byte, 0, testlength) + n, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, buf) + if err == nil { + t.Errorf("LoadBlob() did not return an error for a buffer that is too small to hold the blob") + continue + } + + if n != 0 { + t.Errorf("LoadBlob() returned an error and n > 0") + continue + } + } + + // then use buffers that are large enough + base := restic.CiphertextLength(length) + for _, testlength := range []int{base, base + 7, base + 15, base + 1000} { + buf = make([]byte, 0, testlength) + n, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, buf) + if err != nil { + t.Errorf("LoadBlob() returned an error for buffer size %v: %v", testlength, err) + continue + } + + if n != length { + t.Errorf("LoadBlob() returned the wrong number of bytes: want %v, got %v", length, n) + continue + } + } +} + +func BenchmarkLoadBlob(b *testing.B) { + repo, cleanup := repository.TestRepository(b) + defer cleanup() + + length := 1000000 + buf := restic.NewBlobBuffer(length) + _, err := io.ReadFull(rnd, buf) + rtest.OK(b, err) + + id, err := repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{}) + rtest.OK(b, err) + rtest.OK(b, repo.Flush(context.Background())) + + b.ResetTimer() + b.SetBytes(int64(length)) + + for i := 0; i < b.N; i++ { + n, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, buf) + rtest.OK(b, err) + if n != length { + b.Errorf("wanted %d bytes, got %d", length, n) + } + + id2 := restic.Hash(buf[:n]) + if !id.Equal(id2) { + b.Errorf("wrong data returned, wanted %v, got %v", id.Str(), id2.Str()) + } + } +} + +func BenchmarkLoadAndDecrypt(b *testing.B) { + repo, cleanup := repository.TestRepository(b) + defer cleanup() + + length := 1000000 + buf := restic.NewBlobBuffer(length) + _, err := io.ReadFull(rnd, buf) + rtest.OK(b, err) + + dataID := restic.Hash(buf) + + storageID, err := repo.SaveUnpacked(context.TODO(), restic.DataFile, buf) + rtest.OK(b, err) + // rtest.OK(b, repo.Flush()) + + b.ResetTimer() + b.SetBytes(int64(length)) + + for i := 0; i < b.N; i++ { + data, err := repo.LoadAndDecrypt(context.TODO(), restic.DataFile, storageID) + rtest.OK(b, err) + if len(data) != length { + b.Errorf("wanted %d bytes, got %d", length, len(data)) + } + + id2 := restic.Hash(data) + if !dataID.Equal(id2) { + b.Errorf("wrong data returned, wanted %v, got %v", storageID.Str(), id2.Str()) + } + } +} + +func TestLoadJSONUnpacked(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + if rtest.BenchArchiveDirectory == "" { + t.Skip("benchdir not set, skipping") + } + + // archive a snapshot + sn := restic.Snapshot{} + sn.Hostname = "foobar" + sn.Username = "test!" + + id, err := repo.SaveJSONUnpacked(context.TODO(), restic.SnapshotFile, &sn) + rtest.OK(t, err) + + var sn2 restic.Snapshot + + // restore + err = repo.LoadJSONUnpacked(context.TODO(), restic.SnapshotFile, id, &sn2) + rtest.OK(t, err) + + rtest.Equals(t, sn.Hostname, sn2.Hostname) + rtest.Equals(t, sn.Username, sn2.Username) +} + +var repoFixture = filepath.Join("testdata", "test-repo.tar.gz") + +func TestRepositoryLoadIndex(t *testing.T) { + repodir, cleanup := rtest.Env(t, repoFixture) + defer cleanup() + + repo := repository.TestOpenLocal(t, repodir) + rtest.OK(t, repo.LoadIndex(context.TODO())) +} + +func BenchmarkLoadIndex(b *testing.B) { + repository.TestUseLowSecurityKDFParameters(b) + + repo, cleanup := repository.TestRepository(b) + defer cleanup() + + idx := repository.NewIndex() + + for i := 0; i < 5000; i++ { + idx.Store(restic.PackedBlob{ + Blob: restic.Blob{ + Type: restic.DataBlob, + Length: 1234, + ID: restic.NewRandomID(), + Offset: 1235, + }, + PackID: restic.NewRandomID(), + }) + } + + id, err := repository.SaveIndex(context.TODO(), repo, idx) + rtest.OK(b, err) + + b.Logf("index saved as %v (%v entries)", id.Str(), idx.Count(restic.DataBlob)) + fi, err := repo.Backend().Stat(context.TODO(), restic.Handle{Type: restic.IndexFile, Name: id.String()}) + rtest.OK(b, err) + b.Logf("filesize is %v", fi.Size) + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + _, err := repository.LoadIndex(context.TODO(), repo, id) + rtest.OK(b, err) + } +} + +// saveRandomDataBlobs generates random data blobs and saves them to the repository. +func saveRandomDataBlobs(t testing.TB, repo restic.Repository, num int, sizeMax int) { + for i := 0; i < num; i++ { + size := rand.Int() % sizeMax + + buf := make([]byte, size) + _, err := io.ReadFull(rnd, buf) + rtest.OK(t, err) + + _, err = repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{}) + rtest.OK(t, err) + } +} + +func TestRepositoryIncrementalIndex(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + repository.IndexFull = func(*repository.Index) bool { return true } + + // add 15 packs + for j := 0; j < 5; j++ { + // add 3 packs, write intermediate index + for i := 0; i < 3; i++ { + saveRandomDataBlobs(t, repo, 5, 1<<15) + rtest.OK(t, repo.Flush(context.Background())) + } + + rtest.OK(t, repo.SaveFullIndex(context.TODO())) + } + + // add another 5 packs + for i := 0; i < 5; i++ { + saveRandomDataBlobs(t, repo, 5, 1<<15) + rtest.OK(t, repo.Flush(context.Background())) + } + + // save final index + rtest.OK(t, repo.SaveIndex(context.TODO())) + + packEntries := make(map[restic.ID]map[restic.ID]struct{}) + + err := repo.List(context.TODO(), restic.IndexFile, func(id restic.ID, size int64) error { + idx, err := repository.LoadIndex(context.TODO(), repo, id) + rtest.OK(t, err) + + for pb := range idx.Each(context.TODO()) { + if _, ok := packEntries[pb.PackID]; !ok { + packEntries[pb.PackID] = make(map[restic.ID]struct{}) + } + + packEntries[pb.PackID][id] = struct{}{} + } + return nil + }) + if err != nil { + t.Fatal(err) + } + + for packID, ids := range packEntries { + if len(ids) > 1 { + t.Errorf("pack %v listed in %d indexes\n", packID, len(ids)) + } + } +} + +type backend struct { + rd io.Reader +} + +func (be backend) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error { + return fn(be.rd) +} + +type retryBackend struct { + buf []byte +} + +func (be retryBackend) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error { + err := fn(bytes.NewReader(be.buf[:len(be.buf)/2])) + if err != nil { + return err + } + + return fn(bytes.NewReader(be.buf)) +} + +func TestDownloadAndHash(t *testing.T) { + buf := make([]byte, 5*1024*1024+881) + _, err := io.ReadFull(rnd, buf) + if err != nil { + t.Fatal(err) + } + + var tests = []struct { + be repository.Loader + want []byte + }{ + { + be: backend{rd: bytes.NewReader(buf)}, + want: buf, + }, + { + be: retryBackend{buf: buf}, + want: buf, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + f, id, size, err := repository.DownloadAndHash(context.TODO(), test.be, restic.Handle{}) + if err != nil { + t.Error(err) + } + + want := restic.Hash(test.want) + if !want.Equal(id) { + t.Errorf("wrong hash returned, want %v, got %v", want.Str(), id.Str()) + } + + if size != int64(len(test.want)) { + t.Errorf("wrong size returned, want %v, got %v", test.want, size) + } + + err = f.Close() + if err != nil { + t.Error(err) + } + + err = fs.RemoveIfExists(f.Name()) + if err != nil { + t.Fatal(err) + } + }) + } +} + +type errorReader struct { + err error +} + +func (er errorReader) Read(p []byte) (n int, err error) { + return 0, er.err +} + +func TestDownloadAndHashErrors(t *testing.T) { + var tests = []struct { + be repository.Loader + err string + }{ + { + be: backend{rd: errorReader{errors.New("test error 1")}}, + err: "test error 1", + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + _, _, _, err := repository.DownloadAndHash(context.TODO(), test.be, restic.Handle{}) + if err == nil { + t.Fatalf("wanted error %q, got nil", test.err) + } + + if errors.Cause(err).Error() != test.err { + t.Fatalf("wanted error %q, got %q", test.err, err) + } + }) + } +} diff --git a/internal/repository/testdata/test-repo.tar.gz b/internal/repository/testdata/test-repo.tar.gz new file mode 100644 index 000000000..89065181a Binary files /dev/null and b/internal/repository/testdata/test-repo.tar.gz differ diff --git a/internal/repository/testing.go b/internal/repository/testing.go new file mode 100644 index 000000000..ad8c7a2a0 --- /dev/null +++ b/internal/repository/testing.go @@ -0,0 +1,108 @@ +package repository + +import ( + "context" + "os" + "testing" + + "github.com/restic/restic/internal/backend/local" + "github.com/restic/restic/internal/backend/mem" + "github.com/restic/restic/internal/crypto" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/test" + + "github.com/restic/chunker" +) + +// testKDFParams are the parameters for the KDF to be used during testing. +var testKDFParams = crypto.Params{ + N: 128, + R: 1, + P: 1, +} + +type logger interface { + Logf(format string, args ...interface{}) +} + +// TestUseLowSecurityKDFParameters configures low-security KDF parameters for testing. +func TestUseLowSecurityKDFParameters(t logger) { + t.Logf("using low-security KDF parameters for test") + Params = &testKDFParams +} + +// TestBackend returns a fully configured in-memory backend. +func TestBackend(t testing.TB) (be restic.Backend, cleanup func()) { + return mem.New(), func() {} +} + +const testChunkerPol = chunker.Pol(0x3DA3358B4DC173) + +// TestRepositoryWithBackend returns a repository initialized with a test +// password. If be is nil, an in-memory backend is used. A constant polynomial +// is used for the chunker and low-security test parameters. +func TestRepositoryWithBackend(t testing.TB, be restic.Backend) (r restic.Repository, cleanup func()) { + test.Helper(t).Helper() + TestUseLowSecurityKDFParameters(t) + restic.TestDisableCheckPolynomial(t) + + var beCleanup func() + if be == nil { + be, beCleanup = TestBackend(t) + } + + repo := New(be) + + cfg := restic.TestCreateConfig(t, testChunkerPol) + err := repo.init(context.TODO(), test.TestPassword, cfg) + if err != nil { + t.Fatalf("TestRepository(): initialize repo failed: %v", err) + } + + return repo, func() { + if beCleanup != nil { + beCleanup() + } + } +} + +// TestRepository returns a repository initialized with a test password on an +// in-memory backend. When the environment variable RESTIC_TEST_REPO is set to +// a non-existing directory, a local backend is created there and this is used +// instead. The directory is not removed, but left there for inspection. +func TestRepository(t testing.TB) (r restic.Repository, cleanup func()) { + test.Helper(t).Helper() + dir := os.Getenv("RESTIC_TEST_REPO") + if dir != "" { + _, err := os.Stat(dir) + if err != nil { + be, err := local.Create(local.Config{Path: dir}) + if err != nil { + t.Fatalf("error creating local backend at %v: %v", dir, err) + } + return TestRepositoryWithBackend(t, be) + } + + if err == nil { + t.Logf("directory at %v already exists, using mem backend", dir) + } + } + + return TestRepositoryWithBackend(t, nil) +} + +// TestOpenLocal opens a local repository. +func TestOpenLocal(t testing.TB, dir string) (r restic.Repository) { + be, err := local.Open(local.Config{Path: dir}) + if err != nil { + t.Fatal(err) + } + + repo := New(be) + err = repo.SearchKey(context.TODO(), test.TestPassword, 10, "") + if err != nil { + t.Fatal(err) + } + + return repo +} diff --git a/internal/restic/backend.go b/internal/restic/backend.go new file mode 100644 index 000000000..b2fd46b97 --- /dev/null +++ b/internal/restic/backend.go @@ -0,0 +1,61 @@ +package restic + +import ( + "context" + "io" +) + +// Backend is used to store and access data. +type Backend interface { + // Location returns a string that describes the type and location of the + // repository. + Location() string + + // Test a boolean value whether a File with the name and type exists. + Test(ctx context.Context, h Handle) (bool, error) + + // Remove removes a File described by h. + Remove(ctx context.Context, h Handle) error + + // Close the backend + Close() error + + // Save stores the data from rd under the given handle. + Save(ctx context.Context, h Handle, rd RewindReader) error + + // Load runs fn with a reader that yields the contents of the file at h at the + // given offset. If length is larger than zero, only a portion of the file + // is read. + // + // The function fn may be called multiple times during the same Load invocation + // and therefore must be idempotent. + // + // Implementations are encouraged to use backend.DefaultLoad + Load(ctx context.Context, h Handle, length int, offset int64, fn func(rd io.Reader) error) error + + // Stat returns information about the File identified by h. + Stat(ctx context.Context, h Handle) (FileInfo, error) + + // List runs fn for each file in the backend which has the type t. When an + // error occurs (or fn returns an error), List stops and returns it. + // + // The function fn is called exactly once for each file during successful + // execution and at most once in case of an error. + // + // The function fn is called in the same Goroutine that List() is called + // from. + List(ctx context.Context, t FileType, fn func(FileInfo) error) error + + // IsNotExist returns true if the error was caused by a non-existing file + // in the backend. + IsNotExist(err error) bool + + // Delete removes all data in the backend. + Delete(ctx context.Context) error +} + +// FileInfo is contains information about a file in the backend. +type FileInfo struct { + Size int64 + Name string +} diff --git a/internal/restic/backend_find.go b/internal/restic/backend_find.go new file mode 100644 index 000000000..4d11739e8 --- /dev/null +++ b/internal/restic/backend_find.go @@ -0,0 +1,86 @@ +package restic + +import ( + "context" + + "github.com/restic/restic/internal/errors" +) + +// ErrNoIDPrefixFound is returned by Find() when no ID for the given prefix +// could be found. +var ErrNoIDPrefixFound = errors.New("no matching ID found") + +// ErrMultipleIDMatches is returned by Find() when multiple IDs with the given +// prefix are found. +var ErrMultipleIDMatches = errors.New("multiple IDs with prefix found") + +// Find loads the list of all files of type t and searches for names which +// start with prefix. If none is found, nil and ErrNoIDPrefixFound is returned. +// If more than one is found, nil and ErrMultipleIDMatches is returned. +func Find(be Lister, t FileType, prefix string) (string, error) { + match := "" + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + err := be.List(ctx, t, func(fi FileInfo) error { + if prefix == fi.Name[:len(prefix)] { + if match == "" { + match = fi.Name + } else { + return ErrMultipleIDMatches + } + } + + return nil + }) + + if err != nil { + return "", err + } + + if match != "" { + return match, nil + } + + return "", ErrNoIDPrefixFound +} + +const minPrefixLength = 8 + +// PrefixLength returns the number of bytes required so that all prefixes of +// all names of type t are unique. +func PrefixLength(be Lister, t FileType) (int, error) { + // load all IDs of the given type + list := make([]string, 0, 100) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + err := be.List(ctx, t, func(fi FileInfo) error { + list = append(list, fi.Name) + return nil + }) + + if err != nil { + return 0, err + } + + // select prefixes of length l, test if the last one is the same as the current one + id := ID{} +outer: + for l := minPrefixLength; l < len(id); l++ { + var last string + + for _, name := range list { + if last == name[:l] { + continue outer + } + last = name[:l] + } + + return l, nil + } + + return len(id), nil +} diff --git a/internal/restic/backend_find_test.go b/internal/restic/backend_find_test.go new file mode 100644 index 000000000..2cec35b1f --- /dev/null +++ b/internal/restic/backend_find_test.go @@ -0,0 +1,66 @@ +package restic + +import ( + "context" + "testing" +) + +type mockBackend struct { + list func(context.Context, FileType, func(FileInfo) error) error +} + +func (m mockBackend) List(ctx context.Context, t FileType, fn func(FileInfo) error) error { + return m.list(ctx, t, fn) +} + +var samples = IDs{ + TestParseID("20bdc1402a6fc9b633aaffffffffffffffffffffffffffffffffffffffffffff"), + TestParseID("20bdc1402a6fc9b633ccd578c4a92d0f4ef1a457fa2e16c596bc73fb409d6cc0"), + TestParseID("20bdc1402a6fc9b633ffffffffffffffffffffffffffffffffffffffffffffff"), + TestParseID("20ff988befa5fc40350f00d531a767606efefe242c837aaccb80673f286be53d"), + TestParseID("326cb59dfe802304f96ee9b5b9af93bdee73a30f53981e5ec579aedb6f1d0f07"), + TestParseID("86b60b9594d1d429c4aa98fa9562082cabf53b98c7dc083abe5dae31074dd15a"), + TestParseID("96c8dbe225079e624b5ce509f5bd817d1453cd0a85d30d536d01b64a8669aeae"), + TestParseID("fa31d65b87affcd167b119e9d3d2a27b8236ca4836cb077ed3e96fcbe209b792"), +} + +func TestPrefixLength(t *testing.T) { + list := samples + + m := mockBackend{} + m.list = func(ctx context.Context, t FileType, fn func(FileInfo) error) error { + for _, id := range list { + err := fn(FileInfo{Name: id.String()}) + if err != nil { + return err + } + } + return nil + } + + l, err := PrefixLength(m, SnapshotFile) + if err != nil { + t.Error(err) + } + if l != 19 { + t.Errorf("wrong prefix length returned, want %d, got %d", 19, l) + } + + list = samples[:3] + l, err = PrefixLength(m, SnapshotFile) + if err != nil { + t.Error(err) + } + if l != 19 { + t.Errorf("wrong prefix length returned, want %d, got %d", 19, l) + } + + list = samples[3:] + l, err = PrefixLength(m, SnapshotFile) + if err != nil { + t.Error(err) + } + if l != 8 { + t.Errorf("wrong prefix length returned, want %d, got %d", 8, l) + } +} diff --git a/internal/restic/blob.go b/internal/restic/blob.go new file mode 100644 index 000000000..f8f0737e3 --- /dev/null +++ b/internal/restic/blob.go @@ -0,0 +1,120 @@ +package restic + +import ( + "fmt" + + "github.com/restic/restic/internal/errors" +) + +// Blob is one part of a file or a tree. +type Blob struct { + Type BlobType + Length uint + ID ID + Offset uint +} + +func (b Blob) String() string { + return fmt.Sprintf("", + b.Type, b.ID.Str(), b.Offset, b.Length) +} + +// PackedBlob is a blob stored within a file. +type PackedBlob struct { + Blob + PackID ID +} + +// BlobHandle identifies a blob of a given type. +type BlobHandle struct { + ID ID + Type BlobType +} + +func (h BlobHandle) String() string { + return fmt.Sprintf("<%s/%s>", h.Type, h.ID.Str()) +} + +// BlobType specifies what a blob stored in a pack is. +type BlobType uint8 + +// These are the blob types that can be stored in a pack. +const ( + InvalidBlob BlobType = iota + DataBlob + TreeBlob +) + +func (t BlobType) String() string { + switch t { + case DataBlob: + return "data" + case TreeBlob: + return "tree" + case InvalidBlob: + return "invalid" + } + + return fmt.Sprintf("", t) +} + +// MarshalJSON encodes the BlobType into JSON. +func (t BlobType) MarshalJSON() ([]byte, error) { + switch t { + case DataBlob: + return []byte(`"data"`), nil + case TreeBlob: + return []byte(`"tree"`), nil + } + + return nil, errors.New("unknown blob type") +} + +// UnmarshalJSON decodes the BlobType from JSON. +func (t *BlobType) UnmarshalJSON(buf []byte) error { + switch string(buf) { + case `"data"`: + *t = DataBlob + case `"tree"`: + *t = TreeBlob + default: + return errors.New("unknown blob type") + } + + return nil +} + +// BlobHandles is an ordered list of BlobHandles that implements sort.Interface. +type BlobHandles []BlobHandle + +func (h BlobHandles) Len() int { + return len(h) +} + +func (h BlobHandles) Less(i, j int) bool { + for k, b := range h[i].ID { + if b == h[j].ID[k] { + continue + } + + if b < h[j].ID[k] { + return true + } + + return false + } + + return h[i].Type < h[j].Type +} + +func (h BlobHandles) Swap(i, j int) { + h[i], h[j] = h[j], h[i] +} + +func (h BlobHandles) String() string { + elements := make([]string, 0, len(h)) + for _, e := range h { + elements = append(elements, e.String()) + } + return fmt.Sprintf("%v", elements) +} diff --git a/internal/restic/blob_set.go b/internal/restic/blob_set.go new file mode 100644 index 000000000..07e88fed0 --- /dev/null +++ b/internal/restic/blob_set.go @@ -0,0 +1,109 @@ +package restic + +import "sort" + +// BlobSet is a set of blobs. +type BlobSet map[BlobHandle]struct{} + +// NewBlobSet returns a new BlobSet, populated with ids. +func NewBlobSet(handles ...BlobHandle) BlobSet { + m := make(BlobSet) + for _, h := range handles { + m[h] = struct{}{} + } + + return m +} + +// Has returns true iff id is contained in the set. +func (s BlobSet) Has(h BlobHandle) bool { + _, ok := s[h] + return ok +} + +// Insert adds id to the set. +func (s BlobSet) Insert(h BlobHandle) { + s[h] = struct{}{} +} + +// Delete removes id from the set. +func (s BlobSet) Delete(h BlobHandle) { + delete(s, h) +} + +// Equals returns true iff s equals other. +func (s BlobSet) Equals(other BlobSet) bool { + if len(s) != len(other) { + return false + } + + for h := range s { + if _, ok := other[h]; !ok { + return false + } + } + + return true +} + +// Merge adds the blobs in other to the current set. +func (s BlobSet) Merge(other BlobSet) { + for h := range other { + s.Insert(h) + } +} + +// Intersect returns a new set containing the handles that are present in both sets. +func (s BlobSet) Intersect(other BlobSet) (result BlobSet) { + result = NewBlobSet() + + set1 := s + set2 := other + + // iterate over the smaller set + if len(set2) < len(set1) { + set1, set2 = set2, set1 + } + + for h := range set1 { + if set2.Has(h) { + result.Insert(h) + } + } + + return result +} + +// Sub returns a new set containing all handles that are present in s but not in +// other. +func (s BlobSet) Sub(other BlobSet) (result BlobSet) { + result = NewBlobSet() + for h := range s { + if !other.Has(h) { + result.Insert(h) + } + } + + return result +} + +// List returns a sorted slice of all BlobHandle in the set. +func (s BlobSet) List() BlobHandles { + list := make(BlobHandles, 0, len(s)) + for h := range s { + list = append(list, h) + } + + sort.Sort(list) + + return list +} + +func (s BlobSet) String() string { + str := s.List().String() + if len(str) < 2 { + return "{}" + } + + return "{" + str[1:len(str)-1] + "}" +} diff --git a/internal/restic/blob_test.go b/internal/restic/blob_test.go new file mode 100644 index 000000000..951872250 --- /dev/null +++ b/internal/restic/blob_test.go @@ -0,0 +1,41 @@ +package restic + +import ( + "encoding/json" + "testing" +) + +var blobTypeJSON = []struct { + t BlobType + res string +}{ + {DataBlob, `"data"`}, + {TreeBlob, `"tree"`}, +} + +func TestBlobTypeJSON(t *testing.T) { + for _, test := range blobTypeJSON { + // test serialize + buf, err := json.Marshal(test.t) + if err != nil { + t.Error(err) + continue + } + if test.res != string(buf) { + t.Errorf("want %q, got %q", test.res, string(buf)) + continue + } + + // test unserialize + var v BlobType + err = json.Unmarshal([]byte(test.res), &v) + if err != nil { + t.Error(err) + continue + } + if test.t != v { + t.Errorf("want %v, got %v", test.t, v) + continue + } + } +} diff --git a/internal/restic/buffer.go b/internal/restic/buffer.go new file mode 100644 index 000000000..899f4ea6f --- /dev/null +++ b/internal/restic/buffer.go @@ -0,0 +1,21 @@ +package restic + +import "github.com/restic/restic/internal/crypto" + +// NewBlobBuffer returns a buffer that is large enough to hold a blob of size +// plaintext bytes, including the crypto overhead. +func NewBlobBuffer(size int) []byte { + return make([]byte, size, size+crypto.Extension) +} + +// PlaintextLength returns the plaintext length of a blob with ciphertextSize +// bytes. +func PlaintextLength(ciphertextSize int) int { + return ciphertextSize - crypto.Extension +} + +// CiphertextLength returns the encrypted length of a blob with plaintextSize +// bytes. +func CiphertextLength(plaintextSize int) int { + return plaintextSize + crypto.Extension +} diff --git a/internal/restic/cache.go b/internal/restic/cache.go new file mode 100644 index 000000000..56ed060ac --- /dev/null +++ b/internal/restic/cache.go @@ -0,0 +1,37 @@ +package restic + +import "io" + +// Cache manages a local cache. +type Cache interface { + // BaseDir returns the base directory of the cache. + BaseDir() string + + // Wrap returns a backend with a cache. + Wrap(Backend) Backend + + // IsNotExist returns true if the error was caused by a non-existing file. + IsNotExist(err error) bool + + // Load returns a reader that yields the contents of the file with the + // given id if it is cached. rd must be closed after use. If an error is + // returned, the ReadCloser is nil. The files are still encrypted + Load(h Handle, length int, offset int64) (io.ReadCloser, error) + + // SaveIndex saves an index in the cache. + Save(Handle, io.Reader) error + + // SaveWriter returns a writer for the to be cached object h. It must be + // closed after writing is finished. + SaveWriter(Handle) (io.WriteCloser, error) + + // Remove deletes a single file from the cache. If it isn't cached, this + // functions must return no error. + Remove(Handle) error + + // Clear removes all files of type t from the cache that are not contained in the set. + Clear(FileType, IDSet) error + + // Has returns true if the file is cached. + Has(Handle) bool +} diff --git a/internal/restic/config.go b/internal/restic/config.go new file mode 100644 index 000000000..4f3c6c4bc --- /dev/null +++ b/internal/restic/config.go @@ -0,0 +1,91 @@ +package restic + +import ( + "context" + "testing" + + "github.com/restic/restic/internal/errors" + + "github.com/restic/restic/internal/debug" + + "github.com/restic/chunker" +) + +// Config contains the configuration for a repository. +type Config struct { + Version uint `json:"version"` + ID string `json:"id"` + ChunkerPolynomial chunker.Pol `json:"chunker_polynomial"` +} + +// RepoVersion is the version that is written to the config when a repository +// is newly created with Init(). +const RepoVersion = 1 + +// JSONUnpackedLoader loads unpacked JSON. +type JSONUnpackedLoader interface { + LoadJSONUnpacked(context.Context, FileType, ID, interface{}) error +} + +// CreateConfig creates a config file with a randomly selected polynomial and +// ID. +func CreateConfig() (Config, error) { + var ( + err error + cfg Config + ) + + cfg.ChunkerPolynomial, err = chunker.RandomPolynomial() + if err != nil { + return Config{}, errors.Wrap(err, "chunker.RandomPolynomial") + } + + cfg.ID = NewRandomID().String() + cfg.Version = RepoVersion + + debug.Log("New config: %#v", cfg) + return cfg, nil +} + +// TestCreateConfig creates a config for use within tests. +func TestCreateConfig(t testing.TB, pol chunker.Pol) (cfg Config) { + cfg.ChunkerPolynomial = pol + + cfg.ID = NewRandomID().String() + cfg.Version = RepoVersion + + return cfg +} + +var checkPolynomial = true + +// TestDisableCheckPolynomial disables the check that the polynomial used for +// the chunker. +func TestDisableCheckPolynomial(t testing.TB) { + t.Logf("disabling check of the chunker polynomial") + checkPolynomial = false +} + +// LoadConfig returns loads, checks and returns the config for a repository. +func LoadConfig(ctx context.Context, r JSONUnpackedLoader) (Config, error) { + var ( + cfg Config + ) + + err := r.LoadJSONUnpacked(ctx, ConfigFile, ID{}, &cfg) + if err != nil { + return Config{}, err + } + + if cfg.Version != RepoVersion { + return Config{}, errors.New("unsupported repository version") + } + + if checkPolynomial { + if !cfg.ChunkerPolynomial.Irreducible() { + return Config{}, errors.New("invalid chunker polynomial") + } + } + + return cfg, nil +} diff --git a/internal/restic/config_test.go b/internal/restic/config_test.go new file mode 100644 index 000000000..dd3ddcc74 --- /dev/null +++ b/internal/restic/config_test.go @@ -0,0 +1,55 @@ +package restic_test + +import ( + "context" + "testing" + + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +type saver func(restic.FileType, interface{}) (restic.ID, error) + +func (s saver) SaveJSONUnpacked(t restic.FileType, arg interface{}) (restic.ID, error) { + return s(t, arg) +} + +type loader func(context.Context, restic.FileType, restic.ID, interface{}) error + +func (l loader) LoadJSONUnpacked(ctx context.Context, t restic.FileType, id restic.ID, arg interface{}) error { + return l(ctx, t, id, arg) +} + +func TestConfig(t *testing.T) { + resultConfig := restic.Config{} + save := func(tpe restic.FileType, arg interface{}) (restic.ID, error) { + rtest.Assert(t, tpe == restic.ConfigFile, + "wrong backend type: got %v, wanted %v", + tpe, restic.ConfigFile) + + cfg := arg.(restic.Config) + resultConfig = cfg + return restic.ID{}, nil + } + + cfg1, err := restic.CreateConfig() + rtest.OK(t, err) + + _, err = saver(save).SaveJSONUnpacked(restic.ConfigFile, cfg1) + + load := func(ctx context.Context, tpe restic.FileType, id restic.ID, arg interface{}) error { + rtest.Assert(t, tpe == restic.ConfigFile, + "wrong backend type: got %v, wanted %v", + tpe, restic.ConfigFile) + + cfg := arg.(*restic.Config) + *cfg = resultConfig + return nil + } + + cfg2, err := restic.LoadConfig(context.TODO(), loader(load)) + rtest.OK(t, err) + + rtest.Assert(t, cfg1 == cfg2, + "configs aren't equal: %v != %v", cfg1, cfg2) +} diff --git a/internal/restic/doc.go b/internal/restic/doc.go new file mode 100644 index 000000000..2e53b2524 --- /dev/null +++ b/internal/restic/doc.go @@ -0,0 +1,5 @@ +// Package restic is the top level package for the restic backup program, +// please see https://github.com/restic/restic for more information. +// +// This package exposes the main objects that are handled in restic. +package restic diff --git a/internal/restic/duration.go b/internal/restic/duration.go new file mode 100644 index 000000000..ad56bcc81 --- /dev/null +++ b/internal/restic/duration.go @@ -0,0 +1,137 @@ +package restic + +import ( + "fmt" + "strconv" + "strings" + "unicode" + + "github.com/restic/restic/internal/errors" +) + +// Duration is similar to time.Duration, except it only supports larger ranges +// like hours, days, months, and years. +type Duration struct { + Hours, Days, Months, Years int +} + +func (d Duration) String() string { + var s string + if d.Years != 0 { + s += fmt.Sprintf("%dy", d.Years) + } + + if d.Months != 0 { + s += fmt.Sprintf("%dm", d.Months) + } + + if d.Days != 0 { + s += fmt.Sprintf("%dd", d.Days) + } + + if d.Hours != 0 { + s += fmt.Sprintf("%dh", d.Hours) + } + + return s +} + +func nextNumber(input string) (num int, rest string, err error) { + if len(input) == 0 { + return 0, "", nil + } + + var ( + n string + negative bool + ) + + if input[0] == '-' { + negative = true + input = input[1:] + } + + for i, s := range input { + if !unicode.IsNumber(s) { + rest = input[i:] + break + } + + n += string(s) + } + + if len(n) == 0 { + return 0, input, errors.New("no number found") + } + + num, err = strconv.Atoi(n) + if err != nil { + panic(err) + } + + if negative { + num = -num + } + + return num, rest, nil +} + +// ParseDuration parses a duration from a string. The format is: +// 6y5m234d37h +func ParseDuration(s string) (Duration, error) { + var ( + d Duration + num int + err error + ) + + s = strings.TrimSpace(s) + + for s != "" { + num, s, err = nextNumber(s) + if err != nil { + return Duration{}, err + } + + if len(s) == 0 { + return Duration{}, errors.Errorf("no unit found after number %d", num) + } + + switch s[0] { + case 'y': + d.Years = num + case 'm': + d.Months = num + case 'd': + d.Days = num + case 'h': + d.Hours = num + } + + s = s[1:] + } + + return d, nil +} + +// Set calls ParseDuration and updates d. +func (d *Duration) Set(s string) error { + v, err := ParseDuration(s) + if err != nil { + return err + } + + *d = v + return nil +} + +// Type returns the type of Duration, usable within github.com/spf13/pflag and +// in help texts. +func (d Duration) Type() string { + return "duration" +} + +// Zero returns true if the duration is empty (all values are set to zero). +func (d Duration) Zero() bool { + return d.Years == 0 && d.Months == 0 && d.Days == 0 && d.Hours == 0 +} diff --git a/internal/restic/duration_test.go b/internal/restic/duration_test.go new file mode 100644 index 000000000..716c00cc9 --- /dev/null +++ b/internal/restic/duration_test.go @@ -0,0 +1,96 @@ +package restic + +import ( + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestNextNumber(t *testing.T) { + var tests = []struct { + input string + num int + rest string + err bool + }{ + { + input: "12h", num: 12, rest: "h", + }, + { + input: "3d", num: 3, rest: "d", + }, + { + input: "4d9h", num: 4, rest: "d9h", + }, + { + input: "7m5d", num: 7, rest: "m5d", + }, + { + input: "-23y7m5d", num: -23, rest: "y7m5d", + }, + { + input: "-13y5m11d12h", num: -13, rest: "y5m11d12h", + }, + { + input: " 5d", num: 0, rest: " 5d", err: true, + }, + { + input: "5d ", num: 5, rest: "d ", + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + num, rest, err := nextNumber(test.input) + + if err != nil && !test.err { + t.Fatal(err) + } + + if num != test.num { + t.Errorf("wrong num, want %d, got %d", test.num, num) + } + + if rest != test.rest { + t.Errorf("wrong rest, want %q, got %q", test.rest, rest) + } + }) + } +} + +func TestParseDuration(t *testing.T) { + var tests = []struct { + input string + d Duration + output string + }{ + {"9h", Duration{Hours: 9}, "9h"}, + {"3d", Duration{Days: 3}, "3d"}, + {"4d2h", Duration{Days: 4, Hours: 2}, "4d2h"}, + {"7m5d", Duration{Months: 7, Days: 5}, "7m5d"}, + {"6m4d8h", Duration{Months: 6, Days: 4, Hours: 8}, "6m4d8h"}, + {"5d7m", Duration{Months: 7, Days: 5}, "7m5d"}, + {"4h3d9m", Duration{Months: 9, Days: 3, Hours: 4}, "9m3d4h"}, + {"-7m5d", Duration{Months: -7, Days: 5}, "-7m5d"}, + {"1y4m-5d-3h", Duration{Years: 1, Months: 4, Days: -5, Hours: -3}, "1y4m-5d-3h"}, + {"2y7m-5d", Duration{Years: 2, Months: 7, Days: -5}, "2y7m-5d"}, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + d, err := ParseDuration(test.input) + if err != nil { + t.Fatal(err) + } + + if !cmp.Equal(d, test.d) { + t.Error(cmp.Diff(test.d, d)) + } + + s := d.String() + if s != test.output { + t.Errorf("unexpected return of String(), want %q, got %q", test.output, s) + } + }) + } +} diff --git a/internal/restic/file.go b/internal/restic/file.go new file mode 100644 index 000000000..71b56d657 --- /dev/null +++ b/internal/restic/file.go @@ -0,0 +1,62 @@ +package restic + +import ( + "fmt" + + "github.com/restic/restic/internal/errors" +) + +// FileType is the type of a file in the backend. +type FileType string + +// These are the different data types a backend can store. +const ( + DataFile FileType = "data" + KeyFile = "key" + LockFile = "lock" + SnapshotFile = "snapshot" + IndexFile = "index" + ConfigFile = "config" +) + +// Handle is used to store and access data in a backend. +type Handle struct { + Type FileType + Name string +} + +func (h Handle) String() string { + name := h.Name + if len(name) > 10 { + name = name[:10] + } + return fmt.Sprintf("<%s/%s>", h.Type, name) +} + +// Valid returns an error if h is not valid. +func (h Handle) Valid() error { + if h.Type == "" { + return errors.New("type is empty") + } + + switch h.Type { + case DataFile: + case KeyFile: + case LockFile: + case SnapshotFile: + case IndexFile: + case ConfigFile: + default: + return errors.Errorf("invalid Type %q", h.Type) + } + + if h.Type == ConfigFile { + return nil + } + + if h.Name == "" { + return errors.New("invalid Name") + } + + return nil +} diff --git a/internal/restic/file_test.go b/internal/restic/file_test.go new file mode 100644 index 000000000..2f8f395c2 --- /dev/null +++ b/internal/restic/file_test.go @@ -0,0 +1,28 @@ +package restic + +import "testing" + +var handleTests = []struct { + h Handle + valid bool +}{ + {Handle{Name: "foo"}, false}, + {Handle{Type: "foobar"}, false}, + {Handle{Type: ConfigFile, Name: ""}, true}, + {Handle{Type: DataFile, Name: ""}, false}, + {Handle{Type: "", Name: "x"}, false}, + {Handle{Type: LockFile, Name: "010203040506"}, true}, +} + +func TestHandleValid(t *testing.T) { + for i, test := range handleTests { + err := test.h.Valid() + if err != nil && test.valid { + t.Errorf("test %v failed: error returned for valid handle: %v", i, err) + } + + if !test.valid && err == nil { + t.Errorf("test %v failed: expected error for invalid handle not found", i) + } + } +} diff --git a/internal/restic/find.go b/internal/restic/find.go new file mode 100644 index 000000000..4b118abb0 --- /dev/null +++ b/internal/restic/find.go @@ -0,0 +1,39 @@ +package restic + +import "context" + +// FindUsedBlobs traverses the tree ID and adds all seen blobs (trees and data +// blobs) to the set blobs. The tree blobs in the `seen` BlobSet will not be visited +// again. +func FindUsedBlobs(ctx context.Context, repo Repository, treeID ID, blobs BlobSet, seen BlobSet) error { + blobs.Insert(BlobHandle{ID: treeID, Type: TreeBlob}) + + tree, err := repo.LoadTree(ctx, treeID) + if err != nil { + return err + } + + for _, node := range tree.Nodes { + switch node.Type { + case "file": + for _, blob := range node.Content { + blobs.Insert(BlobHandle{ID: blob, Type: DataBlob}) + } + case "dir": + subtreeID := *node.Subtree + h := BlobHandle{ID: subtreeID, Type: TreeBlob} + if seen.Has(h) { + continue + } + + seen.Insert(h) + + err := FindUsedBlobs(ctx, repo, subtreeID, blobs, seen) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/internal/restic/find_test.go b/internal/restic/find_test.go new file mode 100644 index 000000000..d3620b472 --- /dev/null +++ b/internal/restic/find_test.go @@ -0,0 +1,139 @@ +package restic_test + +import ( + "bufio" + "context" + "encoding/json" + "flag" + "fmt" + "os" + "path/filepath" + "sort" + "testing" + "time" + + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" +) + +func loadIDSet(t testing.TB, filename string) restic.BlobSet { + f, err := os.Open(filename) + if err != nil { + t.Logf("unable to open golden file %v: %v", filename, err) + return restic.NewBlobSet() + } + + sc := bufio.NewScanner(f) + + blobs := restic.NewBlobSet() + for sc.Scan() { + var h restic.BlobHandle + err := json.Unmarshal([]byte(sc.Text()), &h) + if err != nil { + t.Errorf("file %v contained invalid blob: %#v", filename, err) + continue + } + + blobs.Insert(h) + } + + if err = f.Close(); err != nil { + t.Errorf("closing file %v failed with error %v", filename, err) + } + + return blobs +} + +func saveIDSet(t testing.TB, filename string, s restic.BlobSet) { + f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + t.Fatalf("unable to update golden file %v: %v", filename, err) + return + } + + var hs restic.BlobHandles + for h := range s { + hs = append(hs, h) + } + + sort.Sort(hs) + + enc := json.NewEncoder(f) + for _, h := range hs { + err = enc.Encode(h) + if err != nil { + t.Fatalf("Encode() returned error: %v", err) + } + } + + if err = f.Close(); err != nil { + t.Fatalf("close file %v returned error: %v", filename, err) + } +} + +var updateGoldenFiles = flag.Bool("update", false, "update golden files in testdata/") + +const ( + findTestSnapshots = 3 + findTestDepth = 2 +) + +var findTestTime = time.Unix(1469960361, 23) + +func TestFindUsedBlobs(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + var snapshots []*restic.Snapshot + for i := 0; i < findTestSnapshots; i++ { + sn := restic.TestCreateSnapshot(t, repo, findTestTime.Add(time.Duration(i)*time.Second), findTestDepth, 0) + t.Logf("snapshot %v saved, tree %v", sn.ID().Str(), sn.Tree.Str()) + snapshots = append(snapshots, sn) + } + + for i, sn := range snapshots { + usedBlobs := restic.NewBlobSet() + err := restic.FindUsedBlobs(context.TODO(), repo, *sn.Tree, usedBlobs, restic.NewBlobSet()) + if err != nil { + t.Errorf("FindUsedBlobs returned error: %v", err) + continue + } + + if len(usedBlobs) == 0 { + t.Errorf("FindUsedBlobs returned an empty set") + continue + } + + goldenFilename := filepath.Join("testdata", fmt.Sprintf("used_blobs_snapshot%d", i)) + want := loadIDSet(t, goldenFilename) + + if !want.Equals(usedBlobs) { + t.Errorf("snapshot %d: wrong list of blobs returned:\n missing blobs: %v\n extra blobs: %v", + i, want.Sub(usedBlobs), usedBlobs.Sub(want)) + } + + if *updateGoldenFiles { + saveIDSet(t, goldenFilename, usedBlobs) + } + } +} + +func BenchmarkFindUsedBlobs(b *testing.B) { + repo, cleanup := repository.TestRepository(b) + defer cleanup() + + sn := restic.TestCreateSnapshot(b, repo, findTestTime, findTestDepth, 0) + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + seen := restic.NewBlobSet() + blobs := restic.NewBlobSet() + err := restic.FindUsedBlobs(context.TODO(), repo, *sn.Tree, blobs, seen) + if err != nil { + b.Error(err) + } + + b.Logf("found %v blobs", len(blobs)) + } +} diff --git a/internal/restic/hardlinks_index.go b/internal/restic/hardlinks_index.go new file mode 100644 index 000000000..0874f32a4 --- /dev/null +++ b/internal/restic/hardlinks_index.go @@ -0,0 +1,57 @@ +package restic + +import ( + "sync" +) + +// HardlinkKey is a composed key for finding inodes on a specific device. +type HardlinkKey struct { + Inode, Device uint64 +} + +// HardlinkIndex contains a list of inodes, devices these inodes are one, and associated file names. +type HardlinkIndex struct { + m sync.Mutex + Index map[HardlinkKey]string +} + +// NewHardlinkIndex create a new index for hard links +func NewHardlinkIndex() *HardlinkIndex { + return &HardlinkIndex{ + Index: make(map[HardlinkKey]string), + } +} + +// Has checks wether the link already exist in the index. +func (idx *HardlinkIndex) Has(inode uint64, device uint64) bool { + idx.m.Lock() + defer idx.m.Unlock() + _, ok := idx.Index[HardlinkKey{inode, device}] + + return ok +} + +// Add adds a link to the index. +func (idx *HardlinkIndex) Add(inode uint64, device uint64, name string) { + idx.m.Lock() + defer idx.m.Unlock() + _, ok := idx.Index[HardlinkKey{inode, device}] + + if !ok { + idx.Index[HardlinkKey{inode, device}] = name + } +} + +// GetFilename obtains the filename from the index. +func (idx *HardlinkIndex) GetFilename(inode uint64, device uint64) string { + idx.m.Lock() + defer idx.m.Unlock() + return idx.Index[HardlinkKey{inode, device}] +} + +// Remove removes a link from the index. +func (idx *HardlinkIndex) Remove(inode uint64, device uint64) { + idx.m.Lock() + defer idx.m.Unlock() + delete(idx.Index, HardlinkKey{inode, device}) +} diff --git a/internal/restic/hardlinks_index_test.go b/internal/restic/hardlinks_index_test.go new file mode 100644 index 000000000..7e4bcd51a --- /dev/null +++ b/internal/restic/hardlinks_index_test.go @@ -0,0 +1,35 @@ +package restic_test + +import ( + "testing" + + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +// TestHardLinks contains various tests for HardlinkIndex. +func TestHardLinks(t *testing.T) { + + idx := restic.NewHardlinkIndex() + + idx.Add(1, 2, "inode1-file1-on-device2") + idx.Add(2, 3, "inode2-file2-on-device3") + + var sresult string + sresult = idx.GetFilename(1, 2) + rtest.Equals(t, sresult, "inode1-file1-on-device2") + + sresult = idx.GetFilename(2, 3) + rtest.Equals(t, sresult, "inode2-file2-on-device3") + + var bresult bool + bresult = idx.Has(1, 2) + rtest.Equals(t, bresult, true) + + bresult = idx.Has(1, 3) + rtest.Equals(t, bresult, false) + + idx.Remove(1, 2) + bresult = idx.Has(1, 2) + rtest.Equals(t, bresult, false) +} diff --git a/internal/restic/id.go b/internal/restic/id.go new file mode 100644 index 000000000..ffe818a83 --- /dev/null +++ b/internal/restic/id.go @@ -0,0 +1,126 @@ +package restic + +import ( + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "io" + + "github.com/restic/restic/internal/errors" +) + +// Hash returns the ID for data. +func Hash(data []byte) ID { + return sha256.Sum256(data) +} + +// idSize contains the size of an ID, in bytes. +const idSize = sha256.Size + +// ID references content within a repository. +type ID [idSize]byte + +// ParseID converts the given string to an ID. +func ParseID(s string) (ID, error) { + b, err := hex.DecodeString(s) + + if err != nil { + return ID{}, errors.Wrap(err, "hex.DecodeString") + } + + if len(b) != idSize { + return ID{}, errors.New("invalid length for hash") + } + + id := ID{} + copy(id[:], b) + + return id, nil +} + +func (id ID) String() string { + return hex.EncodeToString(id[:]) +} + +// NewRandomID returns a randomly generated ID. When reading from rand fails, +// the function panics. +func NewRandomID() ID { + id := ID{} + _, err := io.ReadFull(rand.Reader, id[:]) + if err != nil { + panic(err) + } + return id +} + +const shortStr = 4 + +// Str returns the shortened string version of id. +func (id *ID) Str() string { + if id == nil { + return "[nil]" + } + + if id.IsNull() { + return "[null]" + } + + return hex.EncodeToString(id[:shortStr]) +} + +// IsNull returns true iff id only consists of null bytes. +func (id ID) IsNull() bool { + var nullID ID + + return id == nullID +} + +// Equal compares an ID to another other. +func (id ID) Equal(other ID) bool { + return id == other +} + +// EqualString compares this ID to another one, given as a string. +func (id ID) EqualString(other string) (bool, error) { + s, err := hex.DecodeString(other) + if err != nil { + return false, errors.Wrap(err, "hex.DecodeString") + } + + id2 := ID{} + copy(id2[:], s) + + return id == id2, nil +} + +// MarshalJSON returns the JSON encoding of id. +func (id ID) MarshalJSON() ([]byte, error) { + return json.Marshal(id.String()) +} + +// UnmarshalJSON parses the JSON-encoded data and stores the result in id. +func (id *ID) UnmarshalJSON(b []byte) error { + var s string + err := json.Unmarshal(b, &s) + if err != nil { + return errors.Wrap(err, "Unmarshal") + } + + _, err = hex.Decode(id[:], []byte(s)) + if err != nil { + return errors.Wrap(err, "hex.Decode") + } + + return nil +} + +// IDFromHash returns the ID for the hash. +func IDFromHash(hash []byte) (id ID) { + if len(hash) != idSize { + panic("invalid hash type, not enough/too many bytes") + } + + copy(id[:], hash) + return id +} diff --git a/internal/restic/id_int_test.go b/internal/restic/id_int_test.go new file mode 100644 index 000000000..a60a11b89 --- /dev/null +++ b/internal/restic/id_int_test.go @@ -0,0 +1,16 @@ +package restic + +import "testing" + +func TestIDMethods(t *testing.T) { + var id ID + + if id.Str() != "[null]" { + t.Errorf("ID.Str() returned wrong value, want %v, got %v", "[null]", id.Str()) + } + + var pid *ID + if pid.Str() != "[nil]" { + t.Errorf("ID.Str() returned wrong value, want %v, got %v", "[nil]", pid.Str()) + } +} diff --git a/internal/restic/id_test.go b/internal/restic/id_test.go new file mode 100644 index 000000000..2e9634a19 --- /dev/null +++ b/internal/restic/id_test.go @@ -0,0 +1,60 @@ +package restic + +import ( + "reflect" + "testing" +) + +var TestStrings = []struct { + id string + data string +}{ + {"c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2", "foobar"}, + {"248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"}, + {"cc5d46bdb4991c6eae3eb739c9c8a7a46fe9654fab79c47b4fe48383b5b25e1c", "foo/bar"}, + {"4e54d2c721cbdb730f01b10b62dec622962b36966ec685880effa63d71c808f2", "foo/../../baz"}, +} + +func TestID(t *testing.T) { + for _, test := range TestStrings { + id, err := ParseID(test.id) + if err != nil { + t.Error(err) + } + + id2, err := ParseID(test.id) + if err != nil { + t.Error(err) + } + if !id.Equal(id2) { + t.Errorf("ID.Equal() does not work as expected") + } + + ret, err := id.EqualString(test.id) + if err != nil { + t.Error(err) + } + if !ret { + t.Error("ID.EqualString() returned wrong value") + } + + // test json marshalling + buf, err := id.MarshalJSON() + if err != nil { + t.Error(err) + } + want := `"` + test.id + `"` + if string(buf) != want { + t.Errorf("string comparison failed, wanted %q, got %q", want, string(buf)) + } + + var id3 ID + err = id3.UnmarshalJSON(buf) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(id, id3) { + t.Error("ids are not equal") + } + } +} diff --git a/internal/restic/ids.go b/internal/restic/ids.go new file mode 100644 index 000000000..cc5ad18da --- /dev/null +++ b/internal/restic/ids.go @@ -0,0 +1,69 @@ +package restic + +import ( + "encoding/hex" + "fmt" +) + +// IDs is an ordered list of IDs that implements sort.Interface. +type IDs []ID + +func (ids IDs) Len() int { + return len(ids) +} + +func (ids IDs) Less(i, j int) bool { + if len(ids[i]) < len(ids[j]) { + return true + } + + for k, b := range ids[i] { + if b == ids[j][k] { + continue + } + + if b < ids[j][k] { + return true + } + + return false + } + + return false +} + +func (ids IDs) Swap(i, j int) { + ids[i], ids[j] = ids[j], ids[i] +} + +// Uniq returns list without duplicate IDs. The returned list retains the order +// of the original list so that the order of the first occurrence of each ID +// stays the same. +func (ids IDs) Uniq() (list IDs) { + seen := NewIDSet() + + for _, id := range ids { + if seen.Has(id) { + continue + } + + list = append(list, id) + seen.Insert(id) + } + + return list +} + +type shortID ID + +func (id shortID) String() string { + return hex.EncodeToString(id[:shortStr]) +} + +func (ids IDs) String() string { + elements := make([]shortID, 0, len(ids)) + for _, id := range ids { + elements = append(elements, shortID(id)) + } + return fmt.Sprintf("%v", elements) +} diff --git a/internal/restic/ids_test.go b/internal/restic/ids_test.go new file mode 100644 index 000000000..9ce02607b --- /dev/null +++ b/internal/restic/ids_test.go @@ -0,0 +1,55 @@ +package restic + +import ( + "reflect" + "testing" +) + +var uniqTests = []struct { + before, after IDs +}{ + { + IDs{ + TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), + TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + }, + IDs{ + TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), + }, + }, + { + IDs{ + TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), + TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + }, + IDs{ + TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), + TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + }, + }, + { + IDs{ + TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), + TestParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), + TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + }, + IDs{ + TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), + TestParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), + TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + }, + }, +} + +func TestUniqIDs(t *testing.T) { + for i, test := range uniqTests { + uniq := test.before.Uniq() + if !reflect.DeepEqual(uniq, test.after) { + t.Errorf("uniqIDs() test %v failed\n wanted: %v\n got: %v", i, test.after, uniq) + } + } +} diff --git a/internal/restic/idset.go b/internal/restic/idset.go new file mode 100644 index 000000000..c31ca7747 --- /dev/null +++ b/internal/restic/idset.go @@ -0,0 +1,111 @@ +package restic + +import "sort" + +// IDSet is a set of IDs. +type IDSet map[ID]struct{} + +// NewIDSet returns a new IDSet, populated with ids. +func NewIDSet(ids ...ID) IDSet { + m := make(IDSet) + for _, id := range ids { + m[id] = struct{}{} + } + + return m +} + +// Has returns true iff id is contained in the set. +func (s IDSet) Has(id ID) bool { + _, ok := s[id] + return ok +} + +// Insert adds id to the set. +func (s IDSet) Insert(id ID) { + s[id] = struct{}{} +} + +// Delete removes id from the set. +func (s IDSet) Delete(id ID) { + delete(s, id) +} + +// List returns a slice of all IDs in the set. +func (s IDSet) List() IDs { + list := make(IDs, 0, len(s)) + for id := range s { + list = append(list, id) + } + + sort.Sort(list) + + return list +} + +// Equals returns true iff s equals other. +func (s IDSet) Equals(other IDSet) bool { + if len(s) != len(other) { + return false + } + + for id := range s { + if _, ok := other[id]; !ok { + return false + } + } + + // length + one-way comparison is sufficient implication of equality + + return true +} + +// Merge adds the blobs in other to the current set. +func (s IDSet) Merge(other IDSet) { + for id := range other { + s.Insert(id) + } +} + +// Intersect returns a new set containing the IDs that are present in both sets. +func (s IDSet) Intersect(other IDSet) (result IDSet) { + result = NewIDSet() + + set1 := s + set2 := other + + // iterate over the smaller set + if len(set2) < len(set1) { + set1, set2 = set2, set1 + } + + for id := range set1 { + if set2.Has(id) { + result.Insert(id) + } + } + + return result +} + +// Sub returns a new set containing all IDs that are present in s but not in +// other. +func (s IDSet) Sub(other IDSet) (result IDSet) { + result = NewIDSet() + for id := range s { + if !other.Has(id) { + result.Insert(id) + } + } + + return result +} + +func (s IDSet) String() string { + str := s.List().String() + if len(str) < 2 { + return "{}" + } + + return "{" + str[1:len(str)-1] + "}" +} diff --git a/internal/restic/idset_test.go b/internal/restic/idset_test.go new file mode 100644 index 000000000..5525eab79 --- /dev/null +++ b/internal/restic/idset_test.go @@ -0,0 +1,32 @@ +package restic + +import ( + "testing" +) + +var idsetTests = []struct { + id ID + seen bool +}{ + {TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), false}, + {TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), false}, + {TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, + {TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, + {TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), true}, + {TestParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), false}, + {TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, + {TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), true}, + {TestParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), true}, + {TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, +} + +func TestIDSet(t *testing.T) { + set := NewIDSet() + for i, test := range idsetTests { + seen := set.Has(test.id) + if seen != test.seen { + t.Errorf("IDSet test %v failed: wanted %v, got %v", i, test.seen, seen) + } + set.Insert(test.id) + } +} diff --git a/internal/restic/lock.go b/internal/restic/lock.go new file mode 100644 index 000000000..3e3a27a6b --- /dev/null +++ b/internal/restic/lock.go @@ -0,0 +1,298 @@ +package restic + +import ( + "context" + "fmt" + "os" + "os/signal" + "os/user" + "sync" + "syscall" + "testing" + "time" + + "github.com/restic/restic/internal/errors" + + "github.com/restic/restic/internal/debug" +) + +// Lock represents a process locking the repository for an operation. +// +// There are two types of locks: exclusive and non-exclusive. There may be many +// different non-exclusive locks, but at most one exclusive lock, which can +// only be acquired while no non-exclusive lock is held. +// +// A lock must be refreshed regularly to not be considered stale, this must be +// triggered by regularly calling Refresh. +type Lock struct { + Time time.Time `json:"time"` + Exclusive bool `json:"exclusive"` + Hostname string `json:"hostname"` + Username string `json:"username"` + PID int `json:"pid"` + UID uint32 `json:"uid,omitempty"` + GID uint32 `json:"gid,omitempty"` + + repo Repository + lockID *ID +} + +// ErrAlreadyLocked is returned when NewLock or NewExclusiveLock are unable to +// acquire the desired lock. +type ErrAlreadyLocked struct { + otherLock *Lock +} + +func (e ErrAlreadyLocked) Error() string { + s := "" + if e.otherLock.Exclusive { + s = "exclusively " + } + return fmt.Sprintf("repository is already locked %sby %v", s, e.otherLock) +} + +// IsAlreadyLocked returns true iff err is an instance of ErrAlreadyLocked. +func IsAlreadyLocked(err error) bool { + if _, ok := errors.Cause(err).(ErrAlreadyLocked); ok { + return true + } + + return false +} + +// NewLock returns a new, non-exclusive lock for the repository. If an +// exclusive lock is already held by another process, ErrAlreadyLocked is +// returned. +func NewLock(ctx context.Context, repo Repository) (*Lock, error) { + return newLock(ctx, repo, false) +} + +// NewExclusiveLock returns a new, exclusive lock for the repository. If +// another lock (normal and exclusive) is already held by another process, +// ErrAlreadyLocked is returned. +func NewExclusiveLock(ctx context.Context, repo Repository) (*Lock, error) { + return newLock(ctx, repo, true) +} + +var waitBeforeLockCheck = 200 * time.Millisecond + +// TestSetLockTimeout can be used to reduce the lock wait timeout for tests. +func TestSetLockTimeout(t testing.TB, d time.Duration) { + t.Logf("setting lock timeout to %v", d) + waitBeforeLockCheck = d +} + +func newLock(ctx context.Context, repo Repository, excl bool) (*Lock, error) { + lock := &Lock{ + Time: time.Now(), + PID: os.Getpid(), + Exclusive: excl, + repo: repo, + } + + hn, err := os.Hostname() + if err == nil { + lock.Hostname = hn + } + + if err = lock.fillUserInfo(); err != nil { + return nil, err + } + + if err = lock.checkForOtherLocks(ctx); err != nil { + return nil, err + } + + lockID, err := lock.createLock(ctx) + if err != nil { + return nil, err + } + + lock.lockID = &lockID + + time.Sleep(waitBeforeLockCheck) + + if err = lock.checkForOtherLocks(ctx); err != nil { + _ = lock.Unlock() + return nil, err + } + + return lock, nil +} + +func (l *Lock) fillUserInfo() error { + usr, err := user.Current() + if err != nil { + return nil + } + l.Username = usr.Username + + l.UID, l.GID, err = uidGidInt(*usr) + return err +} + +// checkForOtherLocks looks for other locks that currently exist in the repository. +// +// If an exclusive lock is to be created, checkForOtherLocks returns an error +// if there are any other locks, regardless if exclusive or not. If a +// non-exclusive lock is to be created, an error is only returned when an +// exclusive lock is found. +func (l *Lock) checkForOtherLocks(ctx context.Context) error { + return l.repo.List(ctx, LockFile, func(id ID, size int64) error { + if l.lockID != nil && id.Equal(*l.lockID) { + return nil + } + + lock, err := LoadLock(ctx, l.repo, id) + if err != nil { + // ignore locks that cannot be loaded + debug.Log("ignore lock %v: %v", id, err) + return nil + } + + if l.Exclusive { + return ErrAlreadyLocked{otherLock: lock} + } + + if !l.Exclusive && lock.Exclusive { + return ErrAlreadyLocked{otherLock: lock} + } + + return nil + }) +} + +// createLock acquires the lock by creating a file in the repository. +func (l *Lock) createLock(ctx context.Context) (ID, error) { + id, err := l.repo.SaveJSONUnpacked(ctx, LockFile, l) + if err != nil { + return ID{}, err + } + + return id, nil +} + +// Unlock removes the lock from the repository. +func (l *Lock) Unlock() error { + if l == nil || l.lockID == nil { + return nil + } + + return l.repo.Backend().Remove(context.TODO(), Handle{Type: LockFile, Name: l.lockID.String()}) +} + +var staleTimeout = 30 * time.Minute + +// Stale returns true if the lock is stale. A lock is stale if the timestamp is +// older than 30 minutes or if it was created on the current machine and the +// process isn't alive any more. +func (l *Lock) Stale() bool { + debug.Log("testing if lock %v for process %d is stale", l, l.PID) + if time.Since(l.Time) > staleTimeout { + debug.Log("lock is stale, timestamp is too old: %v\n", l.Time) + return true + } + + hn, err := os.Hostname() + if err != nil { + debug.Log("unable to find current hostname: %v", err) + // since we cannot find the current hostname, assume that the lock is + // not stale. + return false + } + + if hn != l.Hostname { + // lock was created on a different host, assume the lock is not stale. + return false + } + + // check if we can reach the process retaining the lock + exists := l.processExists() + if !exists { + debug.Log("could not reach process, %d, lock is probably stale\n", l.PID) + return true + } + + debug.Log("lock not stale\n") + return false +} + +// Refresh refreshes the lock by creating a new file in the backend with a new +// timestamp. Afterwards the old lock is removed. +func (l *Lock) Refresh(ctx context.Context) error { + debug.Log("refreshing lock %v", l.lockID) + id, err := l.createLock(ctx) + if err != nil { + return err + } + + err = l.repo.Backend().Remove(context.TODO(), Handle{Type: LockFile, Name: l.lockID.String()}) + if err != nil { + return err + } + + debug.Log("new lock ID %v", id) + l.lockID = &id + + return nil +} + +func (l Lock) String() string { + text := fmt.Sprintf("PID %d on %s by %s (UID %d, GID %d)\nlock was created at %s (%s ago)\nstorage ID %v", + l.PID, l.Hostname, l.Username, l.UID, l.GID, + l.Time.Format("2006-01-02 15:04:05"), time.Since(l.Time), + l.lockID.Str()) + + return text +} + +// listen for incoming SIGHUP and ignore +var ignoreSIGHUP sync.Once + +func init() { + ignoreSIGHUP.Do(func() { + go func() { + c := make(chan os.Signal) + signal.Notify(c, syscall.SIGHUP) + for s := range c { + debug.Log("Signal received: %v\n", s) + } + }() + }) +} + +// LoadLock loads and unserializes a lock from a repository. +func LoadLock(ctx context.Context, repo Repository, id ID) (*Lock, error) { + lock := &Lock{} + if err := repo.LoadJSONUnpacked(ctx, LockFile, id, lock); err != nil { + return nil, err + } + lock.lockID = &id + + return lock, nil +} + +// RemoveStaleLocks deletes all locks detected as stale from the repository. +func RemoveStaleLocks(ctx context.Context, repo Repository) error { + return repo.List(ctx, LockFile, func(id ID, size int64) error { + lock, err := LoadLock(ctx, repo, id) + if err != nil { + // ignore locks that cannot be loaded + debug.Log("ignore lock %v: %v", id, err) + return nil + } + + if lock.Stale() { + return repo.Backend().Remove(context.TODO(), Handle{Type: LockFile, Name: id.String()}) + } + + return nil + }) +} + +// RemoveAllLocks removes all locks forcefully. +func RemoveAllLocks(ctx context.Context, repo Repository) error { + return repo.List(ctx, LockFile, func(id ID, size int64) error { + return repo.Backend().Remove(context.TODO(), Handle{Type: LockFile, Name: id.String()}) + }) +} diff --git a/internal/restic/lock_test.go b/internal/restic/lock_test.go new file mode 100644 index 000000000..daadd479f --- /dev/null +++ b/internal/restic/lock_test.go @@ -0,0 +1,258 @@ +package restic_test + +import ( + "context" + "os" + "testing" + "time" + + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func TestLock(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + lock, err := restic.NewLock(context.TODO(), repo) + rtest.OK(t, err) + + rtest.OK(t, lock.Unlock()) +} + +func TestDoubleUnlock(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + lock, err := restic.NewLock(context.TODO(), repo) + rtest.OK(t, err) + + rtest.OK(t, lock.Unlock()) + + err = lock.Unlock() + rtest.Assert(t, err != nil, + "double unlock didn't return an error, got %v", err) +} + +func TestMultipleLock(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + lock1, err := restic.NewLock(context.TODO(), repo) + rtest.OK(t, err) + + lock2, err := restic.NewLock(context.TODO(), repo) + rtest.OK(t, err) + + rtest.OK(t, lock1.Unlock()) + rtest.OK(t, lock2.Unlock()) +} + +func TestLockExclusive(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + elock, err := restic.NewExclusiveLock(context.TODO(), repo) + rtest.OK(t, err) + rtest.OK(t, elock.Unlock()) +} + +func TestLockOnExclusiveLockedRepo(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + elock, err := restic.NewExclusiveLock(context.TODO(), repo) + rtest.OK(t, err) + + lock, err := restic.NewLock(context.TODO(), repo) + rtest.Assert(t, err != nil, + "create normal lock with exclusively locked repo didn't return an error") + rtest.Assert(t, restic.IsAlreadyLocked(err), + "create normal lock with exclusively locked repo didn't return the correct error") + + rtest.OK(t, lock.Unlock()) + rtest.OK(t, elock.Unlock()) +} + +func TestExclusiveLockOnLockedRepo(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + elock, err := restic.NewLock(context.TODO(), repo) + rtest.OK(t, err) + + lock, err := restic.NewExclusiveLock(context.TODO(), repo) + rtest.Assert(t, err != nil, + "create normal lock with exclusively locked repo didn't return an error") + rtest.Assert(t, restic.IsAlreadyLocked(err), + "create normal lock with exclusively locked repo didn't return the correct error") + + rtest.OK(t, lock.Unlock()) + rtest.OK(t, elock.Unlock()) +} + +func createFakeLock(repo restic.Repository, t time.Time, pid int) (restic.ID, error) { + hostname, err := os.Hostname() + if err != nil { + return restic.ID{}, err + } + + newLock := &restic.Lock{Time: t, PID: pid, Hostname: hostname} + return repo.SaveJSONUnpacked(context.TODO(), restic.LockFile, &newLock) +} + +func removeLock(repo restic.Repository, id restic.ID) error { + h := restic.Handle{Type: restic.LockFile, Name: id.String()} + return repo.Backend().Remove(context.TODO(), h) +} + +var staleLockTests = []struct { + timestamp time.Time + stale bool + staleOnOtherHost bool + pid int +}{ + { + timestamp: time.Now(), + stale: false, + staleOnOtherHost: false, + pid: os.Getpid(), + }, + { + timestamp: time.Now().Add(-time.Hour), + stale: true, + staleOnOtherHost: true, + pid: os.Getpid(), + }, + { + timestamp: time.Now().Add(3 * time.Minute), + stale: false, + staleOnOtherHost: false, + pid: os.Getpid(), + }, + { + timestamp: time.Now(), + stale: true, + staleOnOtherHost: false, + pid: os.Getpid() + 500000, + }, +} + +func TestLockStale(t *testing.T) { + hostname, err := os.Hostname() + rtest.OK(t, err) + + otherHostname := "other-" + hostname + + for i, test := range staleLockTests { + lock := restic.Lock{ + Time: test.timestamp, + PID: test.pid, + Hostname: hostname, + } + + rtest.Assert(t, lock.Stale() == test.stale, + "TestStaleLock: test %d failed: expected stale: %v, got %v", + i, test.stale, !test.stale) + + lock.Hostname = otherHostname + rtest.Assert(t, lock.Stale() == test.staleOnOtherHost, + "TestStaleLock: test %d failed: expected staleOnOtherHost: %v, got %v", + i, test.staleOnOtherHost, !test.staleOnOtherHost) + } +} + +func lockExists(repo restic.Repository, t testing.TB, id restic.ID) bool { + h := restic.Handle{Type: restic.LockFile, Name: id.String()} + exists, err := repo.Backend().Test(context.TODO(), h) + rtest.OK(t, err) + + return exists +} + +func TestLockWithStaleLock(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + id1, err := createFakeLock(repo, time.Now().Add(-time.Hour), os.Getpid()) + rtest.OK(t, err) + + id2, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid()) + rtest.OK(t, err) + + id3, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid()+500000) + rtest.OK(t, err) + + rtest.OK(t, restic.RemoveStaleLocks(context.TODO(), repo)) + + rtest.Assert(t, lockExists(repo, t, id1) == false, + "stale lock still exists after RemoveStaleLocks was called") + rtest.Assert(t, lockExists(repo, t, id2) == true, + "non-stale lock was removed by RemoveStaleLocks") + rtest.Assert(t, lockExists(repo, t, id3) == false, + "stale lock still exists after RemoveStaleLocks was called") + + rtest.OK(t, removeLock(repo, id2)) +} + +func TestRemoveAllLocks(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + id1, err := createFakeLock(repo, time.Now().Add(-time.Hour), os.Getpid()) + rtest.OK(t, err) + + id2, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid()) + rtest.OK(t, err) + + id3, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid()+500000) + rtest.OK(t, err) + + rtest.OK(t, restic.RemoveAllLocks(context.TODO(), repo)) + + rtest.Assert(t, lockExists(repo, t, id1) == false, + "lock still exists after RemoveAllLocks was called") + rtest.Assert(t, lockExists(repo, t, id2) == false, + "lock still exists after RemoveAllLocks was called") + rtest.Assert(t, lockExists(repo, t, id3) == false, + "lock still exists after RemoveAllLocks was called") +} + +func TestLockRefresh(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + lock, err := restic.NewLock(context.TODO(), repo) + rtest.OK(t, err) + + var lockID *restic.ID + err = repo.List(context.TODO(), restic.LockFile, func(id restic.ID, size int64) error { + if lockID != nil { + t.Error("more than one lock found") + } + lockID = &id + return nil + }) + if err != nil { + t.Fatal(err) + } + + rtest.OK(t, lock.Refresh(context.TODO())) + + var lockID2 *restic.ID + err = repo.List(context.TODO(), restic.LockFile, func(id restic.ID, size int64) error { + if lockID2 != nil { + t.Error("more than one lock found") + } + lockID2 = &id + return nil + }) + if err != nil { + t.Fatal(err) + } + + rtest.Assert(t, !lockID.Equal(*lockID2), + "expected a new ID after lock refresh, got the same") + rtest.OK(t, lock.Unlock()) +} diff --git a/internal/restic/lock_unix.go b/internal/restic/lock_unix.go new file mode 100644 index 000000000..019cbbfa5 --- /dev/null +++ b/internal/restic/lock_unix.go @@ -0,0 +1,50 @@ +// +build !windows + +package restic + +import ( + "os" + "os/user" + "strconv" + "syscall" + + "github.com/restic/restic/internal/errors" + + "github.com/restic/restic/internal/debug" +) + +// uidGidInt returns uid, gid of the user as a number. +func uidGidInt(u user.User) (uid, gid uint32, err error) { + var ui, gi int64 + ui, err = strconv.ParseInt(u.Uid, 10, 32) + if err != nil { + return uid, gid, errors.Wrap(err, "ParseInt") + } + gi, err = strconv.ParseInt(u.Gid, 10, 32) + if err != nil { + return uid, gid, errors.Wrap(err, "ParseInt") + } + uid = uint32(ui) + gid = uint32(gi) + return +} + +// checkProcess will check if the process retaining the lock +// exists and responds to SIGHUP signal. +// Returns true if the process exists and responds. +func (l Lock) processExists() bool { + proc, err := os.FindProcess(l.PID) + if err != nil { + debug.Log("error searching for process %d: %v\n", l.PID, err) + return false + } + defer proc.Release() + + debug.Log("sending SIGHUP to process %d\n", l.PID) + err = proc.Signal(syscall.SIGHUP) + if err != nil { + debug.Log("signal error: %v, lock is probably stale\n", err) + return false + } + return true +} diff --git a/internal/restic/lock_windows.go b/internal/restic/lock_windows.go new file mode 100644 index 000000000..5697b6efb --- /dev/null +++ b/internal/restic/lock_windows.go @@ -0,0 +1,25 @@ +package restic + +import ( + "os" + "os/user" + + "github.com/restic/restic/internal/debug" +) + +// uidGidInt always returns 0 on Windows, since uid isn't numbers +func uidGidInt(u user.User) (uid, gid uint32, err error) { + return 0, 0, nil +} + +// checkProcess will check if the process retaining the lock exists. +// Returns true if the process exists. +func (l Lock) processExists() bool { + proc, err := os.FindProcess(l.PID) + if err != nil { + debug.Log("error searching for process %d: %v\n", l.PID, err) + return false + } + proc.Release() + return true +} diff --git a/internal/restic/node.go b/internal/restic/node.go new file mode 100644 index 000000000..638306eac --- /dev/null +++ b/internal/restic/node.go @@ -0,0 +1,700 @@ +package restic + +import ( + "context" + "encoding/json" + "fmt" + "os" + "os/user" + "strconv" + "sync" + "syscall" + "time" + + "github.com/restic/restic/internal/errors" + + "bytes" + "runtime" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/fs" +) + +// ExtendedAttribute is a tuple storing the xattr name and value. +type ExtendedAttribute struct { + Name string `json:"name"` + Value []byte `json:"value"` +} + +// Node is a file, directory or other item in a backup. +type Node struct { + Name string `json:"name"` + Type string `json:"type"` + Mode os.FileMode `json:"mode,omitempty"` + ModTime time.Time `json:"mtime,omitempty"` + AccessTime time.Time `json:"atime,omitempty"` + ChangeTime time.Time `json:"ctime,omitempty"` + UID uint32 `json:"uid"` + GID uint32 `json:"gid"` + User string `json:"user,omitempty"` + Group string `json:"group,omitempty"` + Inode uint64 `json:"inode,omitempty"` + DeviceID uint64 `json:"device_id,omitempty"` // device id of the file, stat.st_dev + Size uint64 `json:"size,omitempty"` + Links uint64 `json:"links,omitempty"` + LinkTarget string `json:"linktarget,omitempty"` + ExtendedAttributes []ExtendedAttribute `json:"extended_attributes,omitempty"` + Device uint64 `json:"device,omitempty"` // in case of Type == "dev", stat.st_rdev + Content IDs `json:"content"` + Subtree *ID `json:"subtree,omitempty"` + + Error string `json:"error,omitempty"` + + Path string `json:"-"` +} + +// Nodes is a slice of nodes that can be sorted. +type Nodes []*Node + +func (n Nodes) Len() int { return len(n) } +func (n Nodes) Less(i, j int) bool { return n[i].Name < n[j].Name } +func (n Nodes) Swap(i, j int) { n[i], n[j] = n[j], n[i] } + +func (node Node) String() string { + var mode os.FileMode + switch node.Type { + case "file": + mode = 0 + case "dir": + mode = os.ModeDir + case "symlink": + mode = os.ModeSymlink + case "dev": + mode = os.ModeDevice + case "chardev": + mode = os.ModeDevice | os.ModeCharDevice + case "fifo": + mode = os.ModeNamedPipe + case "socket": + mode = os.ModeSocket + } + + return fmt.Sprintf("%s %5d %5d %6d %s %s", + mode|node.Mode, node.UID, node.GID, node.Size, node.ModTime, node.Name) +} + +// NodeFromFileInfo returns a new node from the given path and FileInfo. It +// returns the first error that is encountered, together with a node. +func NodeFromFileInfo(path string, fi os.FileInfo) (*Node, error) { + mask := os.ModePerm | os.ModeType | os.ModeSetuid | os.ModeSetgid | os.ModeSticky + node := &Node{ + Path: path, + Name: fi.Name(), + Mode: fi.Mode() & mask, + ModTime: fi.ModTime(), + } + + node.Type = nodeTypeFromFileInfo(fi) + if node.Type == "file" { + node.Size = uint64(fi.Size()) + } + + err := node.fillExtra(path, fi) + return node, err +} + +func nodeTypeFromFileInfo(fi os.FileInfo) string { + switch fi.Mode() & (os.ModeType | os.ModeCharDevice) { + case 0: + return "file" + case os.ModeDir: + return "dir" + case os.ModeSymlink: + return "symlink" + case os.ModeDevice | os.ModeCharDevice: + return "chardev" + case os.ModeDevice: + return "dev" + case os.ModeNamedPipe: + return "fifo" + case os.ModeSocket: + return "socket" + } + + return "" +} + +// GetExtendedAttribute gets the extended attribute. +func (node Node) GetExtendedAttribute(a string) []byte { + for _, attr := range node.ExtendedAttributes { + if attr.Name == a { + return attr.Value + } + } + return nil +} + +// CreateAt creates the node at the given path but does NOT restore node meta data. +func (node *Node) CreateAt(ctx context.Context, path string, repo Repository) error { + debug.Log("create node %v at %v", node.Name, path) + + switch node.Type { + case "dir": + if err := node.createDirAt(path); err != nil { + return err + } + case "file": + if err := node.createFileAt(ctx, path, repo); err != nil { + return err + } + case "symlink": + if err := node.createSymlinkAt(path); err != nil { + return err + } + case "dev": + if err := node.createDevAt(path); err != nil { + return err + } + case "chardev": + if err := node.createCharDevAt(path); err != nil { + return err + } + case "fifo": + if err := node.createFifoAt(path); err != nil { + return err + } + case "socket": + return nil + default: + return errors.Errorf("filetype %q not implemented!\n", node.Type) + } + + return nil +} + +// RestoreMetadata restores node metadata +func (node Node) RestoreMetadata(path string) error { + err := node.restoreMetadata(path) + if err != nil { + debug.Log("restoreMetadata(%s) error %v", path, err) + } + + return err +} + +func (node Node) restoreMetadata(path string) error { + var firsterr error + + if err := lchown(path, int(node.UID), int(node.GID)); err != nil { + // Like "cp -a" and "rsync -a" do, we only report lchown permission errors + // if we run as root. + // On Windows, Geteuid always returns -1, and we always report lchown + // permission errors. + if os.Geteuid() > 0 && os.IsPermission(err) { + debug.Log("not running as root, ignoring lchown permission error for %v: %v", + path, err) + } else { + firsterr = errors.Wrap(err, "Lchown") + } + } + + if node.Type != "symlink" { + if err := fs.Chmod(path, node.Mode); err != nil { + if firsterr != nil { + firsterr = errors.Wrap(err, "Chmod") + } + } + } + + if err := node.RestoreTimestamps(path); err != nil { + debug.Log("error restoring timestamps for dir %v: %v", path, err) + if firsterr != nil { + firsterr = err + } + } + + if err := node.restoreExtendedAttributes(path); err != nil { + debug.Log("error restoring extended attributes for %v: %v", path, err) + if firsterr != nil { + firsterr = err + } + } + + return firsterr +} + +func (node Node) restoreExtendedAttributes(path string) error { + for _, attr := range node.ExtendedAttributes { + err := Setxattr(path, attr.Name, attr.Value) + if err != nil { + return err + } + } + return nil +} + +func (node Node) RestoreTimestamps(path string) error { + var utimes = [...]syscall.Timespec{ + syscall.NsecToTimespec(node.AccessTime.UnixNano()), + syscall.NsecToTimespec(node.ModTime.UnixNano()), + } + + if node.Type == "symlink" { + return node.restoreSymlinkTimestamps(path, utimes) + } + + if err := syscall.UtimesNano(path, utimes[:]); err != nil { + return errors.Wrap(err, "UtimesNano") + } + + return nil +} + +func (node Node) createDirAt(path string) error { + err := fs.Mkdir(path, node.Mode) + if err != nil && !os.IsExist(err) { + return errors.Wrap(err, "Mkdir") + } + + return nil +} + +func (node Node) createFileAt(ctx context.Context, path string, repo Repository) error { + f, err := fs.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600) + if err != nil { + return errors.Wrap(err, "OpenFile") + } + + err = node.writeNodeContent(ctx, repo, f) + closeErr := f.Close() + + if err != nil { + return err + } + + if closeErr != nil { + return errors.Wrap(closeErr, "Close") + } + + return nil +} + +func (node Node) writeNodeContent(ctx context.Context, repo Repository, f *os.File) error { + var buf []byte + for _, id := range node.Content { + size, found := repo.LookupBlobSize(id, DataBlob) + if !found { + return errors.Errorf("id %v not found in repository", id) + } + + buf = buf[:cap(buf)] + if len(buf) < CiphertextLength(int(size)) { + buf = NewBlobBuffer(int(size)) + } + + n, err := repo.LoadBlob(ctx, DataBlob, id, buf) + if err != nil { + return err + } + buf = buf[:n] + + _, err = f.Write(buf) + if err != nil { + return errors.Wrap(err, "Write") + } + } + + return nil +} + +func (node Node) createSymlinkAt(path string) error { + // Windows does not allow non-admins to create soft links. + if runtime.GOOS == "windows" { + return nil + } + err := fs.Symlink(node.LinkTarget, path) + if err != nil { + return errors.Wrap(err, "Symlink") + } + + return nil +} + +func (node *Node) createDevAt(path string) error { + return mknod(path, syscall.S_IFBLK|0600, int(node.Device)) +} + +func (node *Node) createCharDevAt(path string) error { + return mknod(path, syscall.S_IFCHR|0600, int(node.Device)) +} + +func (node *Node) createFifoAt(path string) error { + return mkfifo(path, 0600) +} + +func (node Node) MarshalJSON() ([]byte, error) { + if node.ModTime.Year() < 0 || node.ModTime.Year() > 9999 { + err := errors.Errorf("node %v has invalid ModTime year %d: %v", + node.Path, node.ModTime.Year(), node.ModTime) + return nil, err + } + + if node.ChangeTime.Year() < 0 || node.ChangeTime.Year() > 9999 { + err := errors.Errorf("node %v has invalid ChangeTime year %d: %v", + node.Path, node.ChangeTime.Year(), node.ChangeTime) + return nil, err + } + + if node.AccessTime.Year() < 0 || node.AccessTime.Year() > 9999 { + err := errors.Errorf("node %v has invalid AccessTime year %d: %v", + node.Path, node.AccessTime.Year(), node.AccessTime) + return nil, err + } + + type nodeJSON Node + nj := nodeJSON(node) + name := strconv.Quote(node.Name) + nj.Name = name[1 : len(name)-1] + + return json.Marshal(nj) +} + +func (node *Node) UnmarshalJSON(data []byte) error { + type nodeJSON Node + nj := (*nodeJSON)(node) + + err := json.Unmarshal(data, nj) + if err != nil { + return errors.Wrap(err, "Unmarshal") + } + + nj.Name, err = strconv.Unquote(`"` + nj.Name + `"`) + return errors.Wrap(err, "Unquote") +} + +func (node Node) Equals(other Node) bool { + if node.Name != other.Name { + return false + } + if node.Type != other.Type { + return false + } + if node.Mode != other.Mode { + return false + } + if !node.ModTime.Equal(other.ModTime) { + return false + } + if !node.AccessTime.Equal(other.AccessTime) { + return false + } + if !node.ChangeTime.Equal(other.ChangeTime) { + return false + } + if node.UID != other.UID { + return false + } + if node.GID != other.GID { + return false + } + if node.User != other.User { + return false + } + if node.Group != other.Group { + return false + } + if node.Inode != other.Inode { + return false + } + if node.DeviceID != other.DeviceID { + return false + } + if node.Size != other.Size { + return false + } + if node.Links != other.Links { + return false + } + if node.LinkTarget != other.LinkTarget { + return false + } + if node.Device != other.Device { + return false + } + if !node.sameContent(other) { + return false + } + if !node.sameExtendedAttributes(other) { + return false + } + if node.Subtree != nil { + if other.Subtree == nil { + return false + } + + if !node.Subtree.Equal(*other.Subtree) { + return false + } + } else { + if other.Subtree != nil { + return false + } + } + if node.Error != other.Error { + return false + } + + return true +} + +func (node Node) sameContent(other Node) bool { + if node.Content == nil { + return other.Content == nil + } + + if other.Content == nil { + return false + } + + if len(node.Content) != len(other.Content) { + return false + } + + for i := 0; i < len(node.Content); i++ { + if !node.Content[i].Equal(other.Content[i]) { + return false + } + } + return true +} + +func (node Node) sameExtendedAttributes(other Node) bool { + if len(node.ExtendedAttributes) != len(other.ExtendedAttributes) { + return false + } + + // build a set of all attributes that node has + type mapvalue struct { + value []byte + present bool + } + attributes := make(map[string]mapvalue) + for _, attr := range node.ExtendedAttributes { + attributes[attr.Name] = mapvalue{value: attr.Value} + } + + for _, attr := range other.ExtendedAttributes { + v, ok := attributes[attr.Name] + if !ok { + // extended attribute is not set for node + debug.Log("other node has attribute %v, which is not present in node", attr.Name) + return false + + } + + if !bytes.Equal(v.value, attr.Value) { + // attribute has different value + debug.Log("attribute %v has different value", attr.Name) + return false + } + + // remember that this attribute is present in other. + v.present = true + attributes[attr.Name] = v + } + + // check for attributes that are not present in other + for name, v := range attributes { + if !v.present { + debug.Log("attribute %v not present in other node", name) + return false + } + } + + return true +} + +func (node *Node) fillUser(stat statT) error { + node.UID = stat.uid() + node.GID = stat.gid() + + username, err := lookupUsername(strconv.Itoa(int(stat.uid()))) + if err != nil { + return err + } + + group, err := lookupGroup(strconv.Itoa(int(stat.gid()))) + if err != nil { + return err + } + + node.User = username + node.Group = group + + return nil +} + +var ( + uidLookupCache = make(map[string]string) + uidLookupCacheMutex = sync.RWMutex{} +) + +func lookupUsername(uid string) (string, error) { + uidLookupCacheMutex.RLock() + value, ok := uidLookupCache[uid] + uidLookupCacheMutex.RUnlock() + + if ok { + return value, nil + } + + username := "" + + u, err := user.LookupId(uid) + if err == nil { + username = u.Username + } + + uidLookupCacheMutex.Lock() + uidLookupCache[uid] = username + uidLookupCacheMutex.Unlock() + + return username, nil +} + +var ( + gidLookupCache = make(map[string]string) + gidLookupCacheMutex = sync.RWMutex{} +) + +func lookupGroup(gid string) (string, error) { + gidLookupCacheMutex.RLock() + value, ok := gidLookupCache[gid] + gidLookupCacheMutex.RUnlock() + + if ok { + return value, nil + } + + group := "" + + g, err := user.LookupGroupId(gid) + if err == nil { + group = g.Name + } + + gidLookupCacheMutex.Lock() + gidLookupCache[gid] = group + gidLookupCacheMutex.Unlock() + + return group, nil +} + +func (node *Node) fillExtra(path string, fi os.FileInfo) error { + stat, ok := toStatT(fi.Sys()) + if !ok { + // fill minimal info with current values for uid, gid + node.UID = uint32(os.Getuid()) + node.GID = uint32(os.Getgid()) + node.ChangeTime = node.ModTime + return nil + } + + node.Inode = uint64(stat.ino()) + node.DeviceID = uint64(stat.dev()) + + node.fillTimes(stat) + + var err error + + if err = node.fillUser(stat); err != nil { + return err + } + + switch node.Type { + case "file": + node.Size = uint64(stat.size()) + node.Links = uint64(stat.nlink()) + case "dir": + case "symlink": + node.LinkTarget, err = fs.Readlink(path) + node.Links = uint64(stat.nlink()) + if err != nil { + return errors.Wrap(err, "Readlink") + } + case "dev": + node.Device = uint64(stat.rdev()) + node.Links = uint64(stat.nlink()) + case "chardev": + node.Device = uint64(stat.rdev()) + node.Links = uint64(stat.nlink()) + case "fifo": + case "socket": + default: + return errors.Errorf("invalid node type %q", node.Type) + } + + if err = node.fillExtendedAttributes(path); err != nil { + return err + } + + return nil +} + +func (node *Node) fillExtendedAttributes(path string) error { + if node.Type == "symlink" { + return nil + } + + xattrs, err := Listxattr(path) + debug.Log("fillExtendedAttributes(%v) %v %v", path, xattrs, err) + if err != nil { + return err + } + + node.ExtendedAttributes = make([]ExtendedAttribute, 0, len(xattrs)) + for _, attr := range xattrs { + attrVal, err := Getxattr(path, attr) + if err != nil { + fmt.Fprintf(os.Stderr, "can not obtain extended attribute %v for %v:\n", attr, path) + continue + } + attr := ExtendedAttribute{ + Name: attr, + Value: attrVal, + } + + node.ExtendedAttributes = append(node.ExtendedAttributes, attr) + } + + return nil +} + +type statT interface { + dev() uint64 + ino() uint64 + nlink() uint64 + uid() uint32 + gid() uint32 + rdev() uint64 + size() int64 + atim() syscall.Timespec + mtim() syscall.Timespec + ctim() syscall.Timespec +} + +func mkfifo(path string, mode uint32) (err error) { + return mknod(path, mode|syscall.S_IFIFO, 0) +} + +func (node *Node) fillTimes(stat statT) { + ctim := stat.ctim() + atim := stat.atim() + node.ChangeTime = time.Unix(ctim.Unix()) + node.AccessTime = time.Unix(atim.Unix()) +} + +func changeTime(stat statT) time.Time { + ctim := stat.ctim() + return time.Unix(ctim.Unix()) +} diff --git a/internal/restic/node_darwin.go b/internal/restic/node_darwin.go new file mode 100644 index 000000000..a3f97096d --- /dev/null +++ b/internal/restic/node_darwin.go @@ -0,0 +1,11 @@ +package restic + +import "syscall" + +func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { + return nil +} + +func (s statUnix) atim() syscall.Timespec { return s.Atimespec } +func (s statUnix) mtim() syscall.Timespec { return s.Mtimespec } +func (s statUnix) ctim() syscall.Timespec { return s.Ctimespec } diff --git a/internal/restic/node_freebsd.go b/internal/restic/node_freebsd.go new file mode 100644 index 000000000..a3f97096d --- /dev/null +++ b/internal/restic/node_freebsd.go @@ -0,0 +1,11 @@ +package restic + +import "syscall" + +func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { + return nil +} + +func (s statUnix) atim() syscall.Timespec { return s.Atimespec } +func (s statUnix) mtim() syscall.Timespec { return s.Mtimespec } +func (s statUnix) ctim() syscall.Timespec { return s.Ctimespec } diff --git a/internal/restic/node_linux.go b/internal/restic/node_linux.go new file mode 100644 index 000000000..d33944672 --- /dev/null +++ b/internal/restic/node_linux.go @@ -0,0 +1,37 @@ +package restic + +import ( + "path/filepath" + "syscall" + + "golang.org/x/sys/unix" + + "github.com/restic/restic/internal/errors" + + "github.com/restic/restic/internal/fs" +) + +func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { + dir, err := fs.Open(filepath.Dir(path)) + defer dir.Close() + if err != nil { + return errors.Wrap(err, "Open") + } + + times := []unix.Timespec{ + {Sec: utimes[0].Sec, Nsec: utimes[0].Nsec}, + {Sec: utimes[1].Sec, Nsec: utimes[1].Nsec}, + } + + err = unix.UtimesNanoAt(int(dir.Fd()), filepath.Base(path), times, unix.AT_SYMLINK_NOFOLLOW) + + if err != nil { + return errors.Wrap(err, "UtimesNanoAt") + } + + return nil +} + +func (s statUnix) atim() syscall.Timespec { return s.Atim } +func (s statUnix) mtim() syscall.Timespec { return s.Mtim } +func (s statUnix) ctim() syscall.Timespec { return s.Ctim } diff --git a/internal/restic/node_netbsd.go b/internal/restic/node_netbsd.go new file mode 100644 index 000000000..b588a2b68 --- /dev/null +++ b/internal/restic/node_netbsd.go @@ -0,0 +1,27 @@ +package restic + +import "syscall" + +func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { + return nil +} + +func (s statUnix) atim() syscall.Timespec { return s.Atimespec } +func (s statUnix) mtim() syscall.Timespec { return s.Mtimespec } +func (s statUnix) ctim() syscall.Timespec { return s.Ctimespec } + +// Getxattr retrieves extended attribute data associated with path. +func Getxattr(path, name string) ([]byte, error) { + return nil, nil +} + +// Listxattr retrieves a list of names of extended attributes associated with the +// given path in the file system. +func Listxattr(path string) ([]string, error) { + return nil, nil +} + +// Setxattr associates name and data together as an attribute of path. +func Setxattr(path, name string, data []byte) error { + return nil +} diff --git a/internal/restic/node_openbsd.go b/internal/restic/node_openbsd.go new file mode 100644 index 000000000..8ca4f95b8 --- /dev/null +++ b/internal/restic/node_openbsd.go @@ -0,0 +1,27 @@ +package restic + +import "syscall" + +func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { + return nil +} + +func (s statUnix) atim() syscall.Timespec { return s.Atim } +func (s statUnix) mtim() syscall.Timespec { return s.Mtim } +func (s statUnix) ctim() syscall.Timespec { return s.Ctim } + +// Getxattr retrieves extended attribute data associated with path. +func Getxattr(path, name string) ([]byte, error) { + return nil, nil +} + +// Listxattr retrieves a list of names of extended attributes associated with the +// given path in the file system. +func Listxattr(path string) ([]string, error) { + return nil, nil +} + +// Setxattr associates name and data together as an attribute of path. +func Setxattr(path, name string, data []byte) error { + return nil +} diff --git a/internal/restic/node_solaris.go b/internal/restic/node_solaris.go new file mode 100644 index 000000000..8ca4f95b8 --- /dev/null +++ b/internal/restic/node_solaris.go @@ -0,0 +1,27 @@ +package restic + +import "syscall" + +func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { + return nil +} + +func (s statUnix) atim() syscall.Timespec { return s.Atim } +func (s statUnix) mtim() syscall.Timespec { return s.Mtim } +func (s statUnix) ctim() syscall.Timespec { return s.Ctim } + +// Getxattr retrieves extended attribute data associated with path. +func Getxattr(path, name string) ([]byte, error) { + return nil, nil +} + +// Listxattr retrieves a list of names of extended attributes associated with the +// given path in the file system. +func Listxattr(path string) ([]string, error) { + return nil, nil +} + +// Setxattr associates name and data together as an attribute of path. +func Setxattr(path, name string, data []byte) error { + return nil +} diff --git a/internal/restic/node_test.go b/internal/restic/node_test.go new file mode 100644 index 000000000..f12353a0a --- /dev/null +++ b/internal/restic/node_test.go @@ -0,0 +1,246 @@ +package restic_test + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" + "time" + + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func BenchmarkNodeFillUser(t *testing.B) { + tempfile, err := ioutil.TempFile("", "restic-test-temp-") + if err != nil { + t.Fatal(err) + } + + fi, err := tempfile.Stat() + if err != nil { + t.Fatal(err) + } + + path := tempfile.Name() + + t.ResetTimer() + + for i := 0; i < t.N; i++ { + restic.NodeFromFileInfo(path, fi) + } + + rtest.OK(t, tempfile.Close()) + rtest.RemoveAll(t, tempfile.Name()) +} + +func BenchmarkNodeFromFileInfo(t *testing.B) { + tempfile, err := ioutil.TempFile("", "restic-test-temp-") + if err != nil { + t.Fatal(err) + } + + fi, err := tempfile.Stat() + if err != nil { + t.Fatal(err) + } + + path := tempfile.Name() + + t.ResetTimer() + + for i := 0; i < t.N; i++ { + _, err := restic.NodeFromFileInfo(path, fi) + if err != nil { + t.Fatal(err) + } + } + + rtest.OK(t, tempfile.Close()) + rtest.RemoveAll(t, tempfile.Name()) +} + +func parseTime(s string) time.Time { + t, err := time.Parse("2006-01-02 15:04:05.999", s) + if err != nil { + panic(err) + } + + return t.Local() +} + +var nodeTests = []restic.Node{ + { + Name: "testFile", + Type: "file", + Content: restic.IDs{}, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0604, + ModTime: parseTime("2015-05-14 21:07:23.111"), + AccessTime: parseTime("2015-05-14 21:07:24.222"), + ChangeTime: parseTime("2015-05-14 21:07:25.333"), + }, + { + Name: "testSuidFile", + Type: "file", + Content: restic.IDs{}, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0755 | os.ModeSetuid, + ModTime: parseTime("2015-05-14 21:07:23.111"), + AccessTime: parseTime("2015-05-14 21:07:24.222"), + ChangeTime: parseTime("2015-05-14 21:07:25.333"), + }, + { + Name: "testSuidFile2", + Type: "file", + Content: restic.IDs{}, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0755 | os.ModeSetgid, + ModTime: parseTime("2015-05-14 21:07:23.111"), + AccessTime: parseTime("2015-05-14 21:07:24.222"), + ChangeTime: parseTime("2015-05-14 21:07:25.333"), + }, + { + Name: "testSticky", + Type: "file", + Content: restic.IDs{}, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0755 | os.ModeSticky, + ModTime: parseTime("2015-05-14 21:07:23.111"), + AccessTime: parseTime("2015-05-14 21:07:24.222"), + ChangeTime: parseTime("2015-05-14 21:07:25.333"), + }, + { + Name: "testDir", + Type: "dir", + Subtree: nil, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0750 | os.ModeDir, + ModTime: parseTime("2015-05-14 21:07:23.111"), + AccessTime: parseTime("2015-05-14 21:07:24.222"), + ChangeTime: parseTime("2015-05-14 21:07:25.333"), + }, + { + Name: "testSymlink", + Type: "symlink", + LinkTarget: "invalid", + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0777 | os.ModeSymlink, + ModTime: parseTime("2015-05-14 21:07:23.111"), + AccessTime: parseTime("2015-05-14 21:07:24.222"), + ChangeTime: parseTime("2015-05-14 21:07:25.333"), + }, + + // include "testFile" and "testDir" again with slightly different + // metadata, so we can test if CreateAt works with pre-existing files. + { + Name: "testFile", + Type: "file", + Content: restic.IDs{}, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0604, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + }, + { + Name: "testDir", + Type: "dir", + Subtree: nil, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Mode: 0750 | os.ModeDir, + ModTime: parseTime("2005-05-14 21:07:03.111"), + AccessTime: parseTime("2005-05-14 21:07:04.222"), + ChangeTime: parseTime("2005-05-14 21:07:05.333"), + }, +} + +func TestNodeRestoreAt(t *testing.T) { + tempdir, err := ioutil.TempDir(rtest.TestTempDir, "restic-test-") + rtest.OK(t, err) + + defer func() { + if rtest.TestCleanupTempDirs { + rtest.RemoveAll(t, tempdir) + } else { + t.Logf("leaving tempdir at %v", tempdir) + } + }() + + for _, test := range nodeTests { + nodePath := filepath.Join(tempdir, test.Name) + rtest.OK(t, test.CreateAt(context.TODO(), nodePath, nil)) + rtest.OK(t, test.RestoreMetadata(nodePath)) + + if test.Type == "symlink" && runtime.GOOS == "windows" { + continue + } + if test.Type == "dir" { + rtest.OK(t, test.RestoreTimestamps(nodePath)) + } + + fi, err := os.Lstat(nodePath) + rtest.OK(t, err) + + n2, err := restic.NodeFromFileInfo(nodePath, fi) + rtest.OK(t, err) + + rtest.Assert(t, test.Name == n2.Name, + "%v: name doesn't match (%v != %v)", test.Type, test.Name, n2.Name) + rtest.Assert(t, test.Type == n2.Type, + "%v: type doesn't match (%v != %v)", test.Type, test.Type, n2.Type) + rtest.Assert(t, test.Size == n2.Size, + "%v: size doesn't match (%v != %v)", test.Size, test.Size, n2.Size) + + if runtime.GOOS != "windows" { + rtest.Assert(t, test.UID == n2.UID, + "%v: UID doesn't match (%v != %v)", test.Type, test.UID, n2.UID) + rtest.Assert(t, test.GID == n2.GID, + "%v: GID doesn't match (%v != %v)", test.Type, test.GID, n2.GID) + if test.Type != "symlink" { + // On OpenBSD only root can set sticky bit (see sticky(8)). + if runtime.GOOS != "openbsd" && runtime.GOOS != "netbsd" && test.Name == "testSticky" { + rtest.Assert(t, test.Mode == n2.Mode, + "%v: mode doesn't match (0%o != 0%o)", test.Type, test.Mode, n2.Mode) + } + } + } + + AssertFsTimeEqual(t, "AccessTime", test.Type, test.AccessTime, n2.AccessTime) + AssertFsTimeEqual(t, "ModTime", test.Type, test.ModTime, n2.ModTime) + } +} + +func AssertFsTimeEqual(t *testing.T, label string, nodeType string, t1 time.Time, t2 time.Time) { + var equal bool + + // Go currently doesn't support setting timestamps of symbolic links on darwin and bsd + if nodeType == "symlink" { + switch runtime.GOOS { + case "darwin", "freebsd", "openbsd", "netbsd": + return + } + } + + switch runtime.GOOS { + case "darwin": + // HFS+ timestamps don't support sub-second precision, + // see https://en.wikipedia.org/wiki/Comparison_of_file_systems + diff := int(t1.Sub(t2).Seconds()) + equal = diff == 0 + default: + equal = t1.Equal(t2) + } + + rtest.Assert(t, equal, "%s: %s doesn't match (%v != %v)", label, nodeType, t1, t2) +} diff --git a/internal/restic/node_unix.go b/internal/restic/node_unix.go new file mode 100644 index 000000000..eec07fc5a --- /dev/null +++ b/internal/restic/node_unix.go @@ -0,0 +1,32 @@ +// +build dragonfly linux netbsd openbsd freebsd solaris darwin + +package restic + +import ( + "os" + "syscall" +) + +var mknod = syscall.Mknod +var lchown = os.Lchown + +type statUnix syscall.Stat_t + +func toStatT(i interface{}) (statT, bool) { + if i == nil { + return nil, false + } + s, ok := i.(*syscall.Stat_t) + if ok && s != nil { + return statUnix(*s), true + } + return nil, false +} + +func (s statUnix) dev() uint64 { return uint64(s.Dev) } +func (s statUnix) ino() uint64 { return uint64(s.Ino) } +func (s statUnix) nlink() uint64 { return uint64(s.Nlink) } +func (s statUnix) uid() uint32 { return uint32(s.Uid) } +func (s statUnix) gid() uint32 { return uint32(s.Gid) } +func (s statUnix) rdev() uint64 { return uint64(s.Rdev) } +func (s statUnix) size() int64 { return int64(s.Size) } diff --git a/internal/restic/node_unix_test.go b/internal/restic/node_unix_test.go new file mode 100644 index 000000000..0908d378e --- /dev/null +++ b/internal/restic/node_unix_test.go @@ -0,0 +1,135 @@ +// +build !windows + +package restic + +import ( + "os" + "runtime" + "syscall" + "testing" + "time" +) + +func stat(t testing.TB, filename string) (fi os.FileInfo, ok bool) { + fi, err := os.Lstat(filename) + if err != nil && os.IsNotExist(err) { + return fi, false + } + + if err != nil { + t.Fatal(err) + } + + return fi, true +} + +func checkFile(t testing.TB, stat *syscall.Stat_t, node *Node) { + if uint32(node.Mode.Perm()) != uint32(stat.Mode&0777) { + t.Errorf("Mode does not match, want %v, got %v", stat.Mode&0777, node.Mode) + } + + if node.Inode != uint64(stat.Ino) { + t.Errorf("Inode does not match, want %v, got %v", stat.Ino, node.Inode) + } + + if node.DeviceID != uint64(stat.Dev) { + t.Errorf("Dev does not match, want %v, got %v", stat.Dev, node.DeviceID) + } + + if node.Size != uint64(stat.Size) { + t.Errorf("Size does not match, want %v, got %v", stat.Size, node.Size) + } + + if node.Links != uint64(stat.Nlink) { + t.Errorf("Links does not match, want %v, got %v", stat.Nlink, node.Links) + } + + if node.UID != stat.Uid { + t.Errorf("UID does not match, want %v, got %v", stat.Uid, node.UID) + } + + if node.GID != stat.Gid { + t.Errorf("UID does not match, want %v, got %v", stat.Gid, node.GID) + } + + // use the os dependent function to compare the timestamps + s, ok := toStatT(stat) + if !ok { + return + } + + mtime := s.mtim() + if node.ModTime != time.Unix(mtime.Unix()) { + t.Errorf("ModTime does not match, want %v, got %v", time.Unix(mtime.Unix()), node.ModTime) + } + + ctime := s.ctim() + if node.ChangeTime != time.Unix(ctime.Unix()) { + t.Errorf("ChangeTime does not match, want %v, got %v", time.Unix(ctime.Unix()), node.ChangeTime) + } + + atime := s.atim() + if node.AccessTime != time.Unix(atime.Unix()) { + t.Errorf("AccessTime does not match, want %v, got %v", time.Unix(atime.Unix()), node.AccessTime) + } + +} + +func checkDevice(t testing.TB, stat *syscall.Stat_t, node *Node) { + if node.Device != uint64(stat.Rdev) { + t.Errorf("Rdev does not match, want %v, got %v", stat.Rdev, node.Device) + } +} + +func TestNodeFromFileInfo(t *testing.T) { + type Test struct { + filename string + canSkip bool + } + var tests = []Test{ + {"node_test.go", false}, + {"/dev/sda", true}, + } + + // on darwin, users are not permitted to list the extended attributes of + // /dev/null, therefore skip it. + if runtime.GOOS != "darwin" { + tests = append(tests, Test{"/dev/null", true}) + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + fi, found := stat(t, test.filename) + if !found && test.canSkip { + t.Skipf("%v not found in filesystem", test.filename) + return + } + + if fi.Sys() == nil { + t.Skip("fi.Sys() is nil") + return + } + + s, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + t.Skipf("fi type is %T, not stat_t", fi.Sys()) + return + } + + node, err := NodeFromFileInfo(test.filename, fi) + if err != nil { + t.Fatal(err) + } + + switch node.Type { + case "file": + checkFile(t, s, node) + case "dev", "chardev": + checkFile(t, s, node) + checkDevice(t, s, node) + default: + t.Fatalf("invalid node type %q", node.Type) + } + }) + } +} diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go new file mode 100644 index 000000000..c376cdef6 --- /dev/null +++ b/internal/restic/node_windows.go @@ -0,0 +1,76 @@ +package restic + +import ( + "syscall" + + "github.com/restic/restic/internal/errors" +) + +// mknod() creates a filesystem node (file, device +// special file, or named pipe) named pathname, with attributes +// specified by mode and dev. +var mknod = func(path string, mode uint32, dev int) (err error) { + return errors.New("device nodes cannot be created on windows") +} + +// Windows doesn't need lchown +var lchown = func(path string, uid int, gid int) (err error) { + return nil +} + +func (node Node) restoreSymlinkTimestamps(path string, utimes [2]syscall.Timespec) error { + return nil +} + +// Getxattr retrieves extended attribute data associated with path. +func Getxattr(path, name string) ([]byte, error) { + return nil, nil +} + +// Listxattr retrieves a list of names of extended attributes associated with the +// given path in the file system. +func Listxattr(path string) ([]string, error) { + return nil, nil +} + +// Setxattr associates name and data together as an attribute of path. +func Setxattr(path, name string, data []byte) error { + return nil +} + +type statWin syscall.Win32FileAttributeData + +//ToStatT call the Windows system call Win32FileAttributeData. +func toStatT(i interface{}) (statT, bool) { + if i == nil { + return nil, false + } + s, ok := i.(*syscall.Win32FileAttributeData) + if ok && s != nil { + return statWin(*s), true + } + return nil, false +} + +func (s statWin) dev() uint64 { return 0 } +func (s statWin) ino() uint64 { return 0 } +func (s statWin) nlink() uint64 { return 0 } +func (s statWin) uid() uint32 { return 0 } +func (s statWin) gid() uint32 { return 0 } +func (s statWin) rdev() uint64 { return 0 } + +func (s statWin) size() int64 { + return int64(s.FileSizeLow) | (int64(s.FileSizeHigh) << 32) +} + +func (s statWin) atim() syscall.Timespec { + return syscall.NsecToTimespec(s.LastAccessTime.Nanoseconds()) +} + +func (s statWin) mtim() syscall.Timespec { + return syscall.NsecToTimespec(s.LastWriteTime.Nanoseconds()) +} + +func (s statWin) ctim() syscall.Timespec { + return syscall.NsecToTimespec(s.CreationTime.Nanoseconds()) +} diff --git a/internal/restic/node_xattr.go b/internal/restic/node_xattr.go new file mode 100644 index 000000000..ea5ed8d3a --- /dev/null +++ b/internal/restic/node_xattr.go @@ -0,0 +1,42 @@ +// +build !netbsd +// +build !openbsd +// +build !solaris +// +build !windows + +package restic + +import ( + "syscall" + + "github.com/restic/restic/internal/errors" + + "github.com/pkg/xattr" +) + +// Getxattr retrieves extended attribute data associated with path. +func Getxattr(path, name string) ([]byte, error) { + b, e := xattr.Get(path, name) + if err, ok := e.(*xattr.Error); ok && err.Err == syscall.ENOTSUP { + return nil, nil + } + return b, errors.Wrap(e, "Getxattr") +} + +// Listxattr retrieves a list of names of extended attributes associated with the +// given path in the file system. +func Listxattr(path string) ([]string, error) { + s, e := xattr.List(path) + if err, ok := e.(*xattr.Error); ok && err.Err == syscall.ENOTSUP { + return nil, nil + } + return s, errors.Wrap(e, "Listxattr") +} + +// Setxattr associates name and data together as an attribute of path. +func Setxattr(path, name string, data []byte) error { + e := xattr.Set(path, name, data) + if err, ok := e.(*xattr.Error); ok && err.Err == syscall.ENOTSUP { + return nil + } + return errors.Wrap(e, "Setxattr") +} diff --git a/internal/restic/progress.go b/internal/restic/progress.go new file mode 100644 index 000000000..12d2b8710 --- /dev/null +++ b/internal/restic/progress.go @@ -0,0 +1,233 @@ +package restic + +import ( + "fmt" + "os" + "strconv" + "sync" + "time" + + "golang.org/x/crypto/ssh/terminal" +) + +// minTickerTime limits how often the progress ticker is updated. It can be +// overridden using the RESTIC_PROGRESS_FPS (frames per second) environment +// variable. +var minTickerTime = time.Second / 60 + +var isTerminal = terminal.IsTerminal(int(os.Stdout.Fd())) +var forceUpdateProgress = make(chan bool) + +func init() { + fps, err := strconv.ParseInt(os.Getenv("RESTIC_PROGRESS_FPS"), 10, 64) + if err == nil && fps >= 1 { + if fps > 60 { + fps = 60 + } + minTickerTime = time.Second / time.Duration(fps) + } +} + +// Progress reports progress on an operation. +type Progress struct { + OnStart func() + OnUpdate ProgressFunc + OnDone ProgressFunc + fnM sync.Mutex + + cur Stat + curM sync.Mutex + start time.Time + c *time.Ticker + cancel chan struct{} + o *sync.Once + d time.Duration + lastUpdate time.Time + + running bool +} + +// Stat captures newly done parts of the operation. +type Stat struct { + Files uint64 + Dirs uint64 + Bytes uint64 + Trees uint64 + Blobs uint64 + Errors uint64 +} + +// ProgressFunc is used to report progress back to the user. +type ProgressFunc func(s Stat, runtime time.Duration, ticker bool) + +// NewProgress returns a new progress reporter. When Start() is called, the +// function OnStart is executed once. Afterwards the function OnUpdate is +// called when new data arrives or at least every d interval. The function +// OnDone is called when Done() is called. Both functions are called +// synchronously and can use shared state. +func NewProgress() *Progress { + var d time.Duration + if isTerminal { + d = time.Second + } + return &Progress{d: d} +} + +// Start resets and runs the progress reporter. +func (p *Progress) Start() { + if p == nil || p.running { + return + } + + p.o = &sync.Once{} + p.cancel = make(chan struct{}) + p.running = true + p.Reset() + p.start = time.Now() + p.c = nil + if p.d != 0 { + p.c = time.NewTicker(p.d) + } + + if p.OnStart != nil { + p.OnStart() + } + + go p.reporter() +} + +// Reset resets all statistic counters to zero. +func (p *Progress) Reset() { + if p == nil { + return + } + + if !p.running { + panic("resetting a non-running Progress") + } + + p.curM.Lock() + p.cur = Stat{} + p.curM.Unlock() +} + +// Report adds the statistics from s to the current state and tries to report +// the accumulated statistics via the feedback channel. +func (p *Progress) Report(s Stat) { + if p == nil { + return + } + + if !p.running { + panic("reporting in a non-running Progress") + } + + p.curM.Lock() + p.cur.Add(s) + cur := p.cur + needUpdate := false + if isTerminal && time.Since(p.lastUpdate) > minTickerTime { + p.lastUpdate = time.Now() + needUpdate = true + } + p.curM.Unlock() + + if needUpdate { + p.updateProgress(cur, false) + } + +} + +func (p *Progress) updateProgress(cur Stat, ticker bool) { + if p.OnUpdate == nil { + return + } + + p.fnM.Lock() + p.OnUpdate(cur, time.Since(p.start), ticker) + p.fnM.Unlock() +} + +func (p *Progress) reporter() { + if p == nil { + return + } + + updateProgress := func() { + p.curM.Lock() + cur := p.cur + p.curM.Unlock() + p.updateProgress(cur, true) + } + + var ticker <-chan time.Time + if p.c != nil { + ticker = p.c.C + } + + for { + select { + case <-ticker: + updateProgress() + case <-forceUpdateProgress: + updateProgress() + case <-p.cancel: + if p.c != nil { + p.c.Stop() + } + return + } + } +} + +// Done closes the progress report. +func (p *Progress) Done() { + if p == nil || !p.running { + return + } + + p.running = false + p.o.Do(func() { + close(p.cancel) + }) + + cur := p.cur + + if p.OnDone != nil { + p.fnM.Lock() + p.OnUpdate(cur, time.Since(p.start), false) + p.OnDone(cur, time.Since(p.start), false) + p.fnM.Unlock() + } +} + +// Add accumulates other into s. +func (s *Stat) Add(other Stat) { + s.Bytes += other.Bytes + s.Dirs += other.Dirs + s.Files += other.Files + s.Trees += other.Trees + s.Blobs += other.Blobs + s.Errors += other.Errors +} + +func (s Stat) String() string { + b := float64(s.Bytes) + var str string + + switch { + case s.Bytes > 1<<40: + str = fmt.Sprintf("%.3f TiB", b/(1<<40)) + case s.Bytes > 1<<30: + str = fmt.Sprintf("%.3f GiB", b/(1<<30)) + case s.Bytes > 1<<20: + str = fmt.Sprintf("%.3f MiB", b/(1<<20)) + case s.Bytes > 1<<10: + str = fmt.Sprintf("%.3f KiB", b/(1<<10)) + default: + str = fmt.Sprintf("%dB", s.Bytes) + } + + return fmt.Sprintf("Stat(%d files, %d dirs, %v trees, %v blobs, %d errors, %v)", + s.Files, s.Dirs, s.Trees, s.Blobs, s.Errors, str) +} diff --git a/internal/restic/progress_unix.go b/internal/restic/progress_unix.go new file mode 100644 index 000000000..30954ed31 --- /dev/null +++ b/internal/restic/progress_unix.go @@ -0,0 +1,22 @@ +// +build !windows,!darwin,!freebsd,!netbsd,!openbsd,!dragonfly,!solaris + +package restic + +import ( + "os" + "os/signal" + "syscall" + + "github.com/restic/restic/internal/debug" +) + +func init() { + c := make(chan os.Signal) + signal.Notify(c, syscall.SIGUSR1) + go func() { + for s := range c { + debug.Log("Signal received: %v\n", s) + forceUpdateProgress <- true + } + }() +} diff --git a/internal/restic/progress_unix_with_siginfo.go b/internal/restic/progress_unix_with_siginfo.go new file mode 100644 index 000000000..b27193a89 --- /dev/null +++ b/internal/restic/progress_unix_with_siginfo.go @@ -0,0 +1,23 @@ +// +build darwin freebsd netbsd openbsd dragonfly + +package restic + +import ( + "os" + "os/signal" + "syscall" + + "github.com/restic/restic/internal/debug" +) + +func init() { + c := make(chan os.Signal) + signal.Notify(c, syscall.SIGUSR1) + signal.Notify(c, syscall.SIGINFO) + go func() { + for s := range c { + debug.Log("Signal received: %v\n", s) + forceUpdateProgress <- true + } + }() +} diff --git a/internal/restic/rand_reader.go b/internal/restic/rand_reader.go new file mode 100644 index 000000000..b5bc83839 --- /dev/null +++ b/internal/restic/rand_reader.go @@ -0,0 +1,81 @@ +package restic + +import ( + "io" + "math/rand" + + "github.com/restic/restic/internal/errors" +) + +// RandReader allows reading from a rand.Rand. +type RandReader struct { + rnd *rand.Rand + buf []byte +} + +// NewRandReader creates a new Reader from a random source. +func NewRandReader(rnd *rand.Rand) io.Reader { + return &RandReader{rnd: rnd, buf: make([]byte, 0, 7)} +} + +func (rd *RandReader) read(p []byte) (n int, err error) { + if len(p)%7 != 0 { + panic("invalid buffer length, not multiple of 7") + } + + rnd := rd.rnd + for i := 0; i < len(p); i += 7 { + val := rnd.Int63() + + p[i+0] = byte(val >> 0) + p[i+1] = byte(val >> 8) + p[i+2] = byte(val >> 16) + p[i+3] = byte(val >> 24) + p[i+4] = byte(val >> 32) + p[i+5] = byte(val >> 40) + p[i+6] = byte(val >> 48) + } + + return len(p), nil +} + +func (rd *RandReader) Read(p []byte) (int, error) { + // first, copy buffer to p + pos := copy(p, rd.buf) + copy(rd.buf, rd.buf[pos:]) + + // shorten buf and p accordingly + rd.buf = rd.buf[:len(rd.buf)-pos] + p = p[pos:] + + // if this is enough to fill p, return + if len(p) == 0 { + return pos, nil + } + + // load multiple of 7 byte + l := (len(p) / 7) * 7 + n, err := rd.read(p[:l]) + pos += n + if err != nil { + return pos, errors.Wrap(err, "Read") + } + p = p[n:] + + // load 7 byte to temp buffer + rd.buf = rd.buf[:7] + n, err = rd.read(rd.buf) + if err != nil { + return pos, errors.Wrap(err, "Read") + } + + // copy the remaining bytes from the buffer to p + n = copy(p, rd.buf) + pos += n + + // save the remaining bytes in rd.buf + n = copy(rd.buf, rd.buf[n:]) + rd.buf = rd.buf[:n] + + return pos, nil +} diff --git a/internal/restic/readerat.go b/internal/restic/readerat.go new file mode 100644 index 000000000..6e945b43a --- /dev/null +++ b/internal/restic/readerat.go @@ -0,0 +1,41 @@ +package restic + +import ( + "context" + "io" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" +) + +type backendReaderAt struct { + be Backend + h Handle +} + +func (brd backendReaderAt) ReadAt(p []byte, offset int64) (n int, err error) { + return ReadAt(context.TODO(), brd.be, brd.h, offset, p) +} + +// ReaderAt returns an io.ReaderAt for a file in the backend. +func ReaderAt(be Backend, h Handle) io.ReaderAt { + return backendReaderAt{be: be, h: h} +} + +// ReadAt reads from the backend handle h at the given position. +func ReadAt(ctx context.Context, be Backend, h Handle, offset int64, p []byte) (n int, err error) { + debug.Log("ReadAt(%v) at %v, len %v", h, offset, len(p)) + + err = be.Load(ctx, h, len(p), offset, func(rd io.Reader) (ierr error) { + n, ierr = io.ReadFull(rd, p) + + return ierr + }) + if err != nil { + return 0, err + } + + debug.Log("ReadAt(%v) ReadFull returned %v bytes", h, n) + + return n, errors.Wrapf(err, "ReadFull(%v)", h) +} diff --git a/internal/restic/repository.go b/internal/restic/repository.go new file mode 100644 index 000000000..ff8f38034 --- /dev/null +++ b/internal/restic/repository.go @@ -0,0 +1,67 @@ +package restic + +import ( + "context" + + "github.com/restic/restic/internal/crypto" +) + +// Repository stores data in a backend. It provides high-level functions and +// transparently encrypts/decrypts data. +type Repository interface { + + // Backend returns the backend used by the repository + Backend() Backend + + Key() *crypto.Key + + SetIndex(Index) error + + Index() Index + SaveFullIndex(context.Context) error + SaveIndex(context.Context) error + LoadIndex(context.Context) error + + Config() Config + + LookupBlobSize(ID, BlobType) (uint, bool) + + // List calls the function fn for each file of type t in the repository. + // When an error is returned by fn, processing stops and List() returns the + // error. + // + // The function fn is called in the same Goroutine List() was called from. + List(ctx context.Context, t FileType, fn func(ID, int64) error) error + ListPack(context.Context, ID, int64) ([]Blob, int64, error) + + Flush(context.Context) error + + SaveUnpacked(context.Context, FileType, []byte) (ID, error) + SaveJSONUnpacked(context.Context, FileType, interface{}) (ID, error) + + LoadJSONUnpacked(context.Context, FileType, ID, interface{}) error + LoadAndDecrypt(context.Context, FileType, ID) ([]byte, error) + + LoadBlob(context.Context, BlobType, ID, []byte) (int, error) + SaveBlob(context.Context, BlobType, []byte, ID) (ID, error) + + LoadTree(context.Context, ID) (*Tree, error) + SaveTree(context.Context, *Tree) (ID, error) +} + +// Lister allows listing files in a backend. +type Lister interface { + List(context.Context, FileType, func(FileInfo) error) error +} + +// Index keeps track of the blobs are stored within files. +type Index interface { + Has(ID, BlobType) bool + Lookup(ID, BlobType) ([]PackedBlob, bool) + Count(BlobType) uint + + // Each returns a channel that yields all blobs known to the index. When + // the context is cancelled, the background goroutine terminates. This + // blocks any modification of the index. + Each(ctx context.Context) <-chan PackedBlob +} diff --git a/internal/restic/rewind_reader.go b/internal/restic/rewind_reader.go new file mode 100644 index 000000000..acbb29678 --- /dev/null +++ b/internal/restic/rewind_reader.go @@ -0,0 +1,90 @@ +package restic + +import ( + "bytes" + "io" + + "github.com/restic/restic/internal/errors" +) + +// RewindReader allows resetting the Reader to the beginning of the data. +type RewindReader interface { + io.Reader + + // Rewind rewinds the reader so the same data can be read again from the + // start. + Rewind() error + + // Length returns the number of bytes that can be read from the Reader + // after calling Rewind. + Length() int64 +} + +// ByteReader implements a RewindReader for a byte slice. +type ByteReader struct { + *bytes.Reader + Len int64 +} + +// Rewind restarts the reader from the beginning of the data. +func (b *ByteReader) Rewind() error { + _, err := b.Reader.Seek(0, io.SeekStart) + return err +} + +// Length returns the number of bytes read from the reader after Rewind is +// called. +func (b *ByteReader) Length() int64 { + return b.Len +} + +// statically ensure that *ByteReader implements RewindReader. +var _ RewindReader = &ByteReader{} + +// NewByteReader prepares a ByteReader that can then be used to read buf. +func NewByteReader(buf []byte) *ByteReader { + return &ByteReader{ + Reader: bytes.NewReader(buf), + Len: int64(len(buf)), + } +} + +// statically ensure that *FileReader implements RewindReader. +var _ RewindReader = &FileReader{} + +// FileReader implements a RewindReader for an open file. +type FileReader struct { + io.ReadSeeker + Len int64 +} + +// Rewind seeks to the beginning of the file. +func (f *FileReader) Rewind() error { + _, err := f.ReadSeeker.Seek(0, io.SeekStart) + return errors.Wrap(err, "Seek") +} + +// Length returns the length of the file. +func (f *FileReader) Length() int64 { + return f.Len +} + +// NewFileReader wraps f in a *FileReader. +func NewFileReader(f io.ReadSeeker) (*FileReader, error) { + pos, err := f.Seek(0, io.SeekEnd) + if err != nil { + return nil, errors.Wrap(err, "Seek") + } + + fr := &FileReader{ + ReadSeeker: f, + Len: pos, + } + + err = fr.Rewind() + if err != nil { + return nil, err + } + + return fr, nil +} diff --git a/internal/restic/rewind_reader_test.go b/internal/restic/rewind_reader_test.go new file mode 100644 index 000000000..53f0a4424 --- /dev/null +++ b/internal/restic/rewind_reader_test.go @@ -0,0 +1,154 @@ +package restic + +import ( + "bytes" + "io" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "testing" + "time" + + "github.com/restic/restic/internal/test" +) + +func TestByteReader(t *testing.T) { + buf := []byte("foobar") + fn := func() RewindReader { + return NewByteReader(buf) + } + testRewindReader(t, fn, buf) +} + +func TestFileReader(t *testing.T) { + buf := []byte("foobar") + + d, cleanup := test.TempDir(t) + defer cleanup() + + filename := filepath.Join(d, "file-reader-test") + err := ioutil.WriteFile(filename, []byte("foobar"), 0600) + if err != nil { + t.Fatal(err) + } + + f, err := os.Open(filename) + if err != nil { + t.Fatal(err) + } + + defer func() { + err := f.Close() + if err != nil { + t.Fatal(err) + } + }() + + fn := func() RewindReader { + rd, err := NewFileReader(f) + if err != nil { + t.Fatal(err) + } + return rd + } + + testRewindReader(t, fn, buf) +} + +func testRewindReader(t *testing.T, fn func() RewindReader, data []byte) { + seed := time.Now().UnixNano() + t.Logf("seed is %d", seed) + rnd := rand.New(rand.NewSource(seed)) + + type ReaderTestFunc func(t testing.TB, r RewindReader, data []byte) + var tests = []ReaderTestFunc{ + func(t testing.TB, rd RewindReader, data []byte) { + if rd.Length() != int64(len(data)) { + t.Fatalf("wrong length returned, want %d, got %d", int64(len(data)), rd.Length()) + } + + buf := make([]byte, len(data)) + _, err := io.ReadFull(rd, buf) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(buf, data) { + t.Fatalf("wrong data returned") + } + + if rd.Length() != int64(len(data)) { + t.Fatalf("wrong length returned, want %d, got %d", int64(len(data)), rd.Length()) + } + + err = rd.Rewind() + if err != nil { + t.Fatal(err) + } + + if rd.Length() != int64(len(data)) { + t.Fatalf("wrong length returned, want %d, got %d", int64(len(data)), rd.Length()) + } + + buf2 := make([]byte, int64(len(data))) + _, err = io.ReadFull(rd, buf2) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(buf2, data) { + t.Fatalf("wrong data returned") + } + + if rd.Length() != int64(len(data)) { + t.Fatalf("wrong length returned, want %d, got %d", int64(len(data)), rd.Length()) + } + }, + func(t testing.TB, rd RewindReader, data []byte) { + // read first bytes + buf := make([]byte, rnd.Intn(len(data))) + _, err := io.ReadFull(rd, buf) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(buf, data[:len(buf)]) { + t.Fatalf("wrong data returned") + } + + err = rd.Rewind() + if err != nil { + t.Fatal(err) + } + + buf2 := make([]byte, rnd.Intn(len(data))) + _, err = io.ReadFull(rd, buf2) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(buf2, data[:len(buf2)]) { + t.Fatalf("wrong data returned") + } + + // read remainder + buf3 := make([]byte, len(data)-len(buf2)) + _, err = io.ReadFull(rd, buf3) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(buf3, data[len(buf2):]) { + t.Fatalf("wrong data returned") + } + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + rd := fn() + test(t, rd, data) + }) + } +} diff --git a/internal/restic/snapshot.go b/internal/restic/snapshot.go new file mode 100644 index 000000000..61467013a --- /dev/null +++ b/internal/restic/snapshot.go @@ -0,0 +1,220 @@ +package restic + +import ( + "context" + "fmt" + "os/user" + "path/filepath" + "time" + + "github.com/restic/restic/internal/debug" +) + +// Snapshot is the state of a resource at one point in time. +type Snapshot struct { + Time time.Time `json:"time"` + Parent *ID `json:"parent,omitempty"` + Tree *ID `json:"tree"` + Paths []string `json:"paths"` + Hostname string `json:"hostname,omitempty"` + Username string `json:"username,omitempty"` + UID uint32 `json:"uid,omitempty"` + GID uint32 `json:"gid,omitempty"` + Excludes []string `json:"excludes,omitempty"` + Tags []string `json:"tags,omitempty"` + Original *ID `json:"original,omitempty"` + + id *ID // plaintext ID, used during restore +} + +// NewSnapshot returns an initialized snapshot struct for the current user and +// time. +func NewSnapshot(paths []string, tags []string, hostname string, time time.Time) (*Snapshot, error) { + absPaths := make([]string, 0, len(paths)) + for _, path := range paths { + p, err := filepath.Abs(path) + if err == nil { + absPaths = append(absPaths, p) + } else { + absPaths = append(absPaths, path) + } + } + + sn := &Snapshot{ + Paths: absPaths, + Time: time, + Tags: tags, + Hostname: hostname, + } + + err := sn.fillUserInfo() + if err != nil { + return nil, err + } + + return sn, nil +} + +// LoadSnapshot loads the snapshot with the id and returns it. +func LoadSnapshot(ctx context.Context, repo Repository, id ID) (*Snapshot, error) { + sn := &Snapshot{id: &id} + err := repo.LoadJSONUnpacked(ctx, SnapshotFile, id, sn) + if err != nil { + return nil, err + } + + return sn, nil +} + +// LoadAllSnapshots returns a list of all snapshots in the repo. +func LoadAllSnapshots(ctx context.Context, repo Repository) (snapshots []*Snapshot, err error) { + err = repo.List(ctx, SnapshotFile, func(id ID, size int64) error { + sn, err := LoadSnapshot(ctx, repo, id) + if err != nil { + return err + } + + snapshots = append(snapshots, sn) + return nil + }) + + if err != nil { + return nil, err + } + + return snapshots, nil +} + +func (sn Snapshot) String() string { + return fmt.Sprintf("", + sn.id.Str(), sn.Paths, sn.Time, sn.Username, sn.Hostname) +} + +// ID returns the snapshot's ID. +func (sn Snapshot) ID() *ID { + return sn.id +} + +func (sn *Snapshot) fillUserInfo() error { + usr, err := user.Current() + if err != nil { + return nil + } + sn.Username = usr.Username + + // set userid and groupid + sn.UID, sn.GID, err = uidGidInt(*usr) + return err +} + +// AddTags adds the given tags to the snapshots tags, preventing duplicates. +// It returns true if any changes were made. +func (sn *Snapshot) AddTags(addTags []string) (changed bool) { +nextTag: + for _, add := range addTags { + for _, tag := range sn.Tags { + if tag == add { + continue nextTag + } + } + sn.Tags = append(sn.Tags, add) + changed = true + } + return +} + +// RemoveTags removes the given tags from the snapshots tags and +// returns true if any changes were made. +func (sn *Snapshot) RemoveTags(removeTags []string) (changed bool) { + for _, remove := range removeTags { + for i, tag := range sn.Tags { + if tag == remove { + // https://github.com/golang/go/wiki/SliceTricks + sn.Tags[i] = sn.Tags[len(sn.Tags)-1] + sn.Tags[len(sn.Tags)-1] = "" + sn.Tags = sn.Tags[:len(sn.Tags)-1] + + changed = true + break + } + } + } + return +} + +func (sn *Snapshot) hasTag(tag string) bool { + for _, snTag := range sn.Tags { + if tag == snTag { + return true + } + } + return false +} + +// HasTags returns true if the snapshot has all the tags in l. +func (sn *Snapshot) HasTags(l []string) bool { + for _, tag := range l { + if !sn.hasTag(tag) { + return false + } + } + + return true +} + +// HasTagList returns true if the snapshot satisfies at least one TagList, +// so there is a TagList in l for which all tags are included in sn. +func (sn *Snapshot) HasTagList(l []TagList) bool { + debug.Log("testing snapshot with tags %v against list: %v", sn.Tags, l) + + if len(l) == 0 { + return true + } + + for _, tags := range l { + if sn.HasTags(tags) { + debug.Log(" snapshot satisfies %v %v", tags, l) + return true + } + } + + return false +} + +func (sn *Snapshot) hasPath(path string) bool { + for _, snPath := range sn.Paths { + if path == snPath { + return true + } + } + return false +} + +// HasPaths returns true if the snapshot has all of the paths. +func (sn *Snapshot) HasPaths(paths []string) bool { + for _, path := range paths { + if !sn.hasPath(path) { + return false + } + } + + return true +} + +// Snapshots is a list of snapshots. +type Snapshots []*Snapshot + +// Len returns the number of snapshots in sn. +func (sn Snapshots) Len() int { + return len(sn) +} + +// Less returns true iff the ith snapshot has been made after the jth. +func (sn Snapshots) Less(i, j int) bool { + return sn[i].Time.After(sn[j].Time) +} + +// Swap exchanges the two snapshots. +func (sn Snapshots) Swap(i, j int) { + sn[i], sn[j] = sn[j], sn[i] +} diff --git a/internal/restic/snapshot_find.go b/internal/restic/snapshot_find.go new file mode 100644 index 000000000..e7747c561 --- /dev/null +++ b/internal/restic/snapshot_find.go @@ -0,0 +1,108 @@ +package restic + +import ( + "context" + "fmt" + "os" + "path/filepath" + "time" + + "github.com/restic/restic/internal/errors" +) + +// ErrNoSnapshotFound is returned when no snapshot for the given criteria could be found. +var ErrNoSnapshotFound = errors.New("no snapshot found") + +// FindLatestSnapshot finds latest snapshot with optional target/directory, tags and hostname filters. +func FindLatestSnapshot(ctx context.Context, repo Repository, targets []string, tagLists []TagList, hostname string) (ID, error) { + var err error + absTargets := make([]string, 0, len(targets)) + for _, target := range targets { + if !filepath.IsAbs(target) { + target, err = filepath.Abs(target) + if err != nil { + return ID{}, errors.Wrap(err, "Abs") + } + } + absTargets = append(absTargets, filepath.Clean(target)) + } + + var ( + latest time.Time + latestID ID + found bool + ) + + err = repo.List(ctx, SnapshotFile, func(snapshotID ID, size int64) error { + snapshot, err := LoadSnapshot(ctx, repo, snapshotID) + if err != nil { + return errors.Errorf("Error loading snapshot %v: %v", snapshotID.Str(), err) + } + if snapshot.Time.Before(latest) || (hostname != "" && hostname != snapshot.Hostname) { + return nil + } + + if !snapshot.HasTagList(tagLists) { + return nil + } + + if !snapshot.HasPaths(absTargets) { + return nil + } + + latest = snapshot.Time + latestID = snapshotID + found = true + return nil + }) + + if err != nil { + return ID{}, err + } + + if !found { + return ID{}, ErrNoSnapshotFound + } + + return latestID, nil +} + +// FindSnapshot takes a string and tries to find a snapshot whose ID matches +// the string as closely as possible. +func FindSnapshot(repo Repository, s string) (ID, error) { + + // find snapshot id with prefix + name, err := Find(repo.Backend(), SnapshotFile, s) + if err != nil { + return ID{}, err + } + + return ParseID(name) +} + +// FindFilteredSnapshots yields Snapshots filtered from the list of all +// snapshots. +func FindFilteredSnapshots(ctx context.Context, repo Repository, host string, tags []TagList, paths []string) (Snapshots, error) { + results := make(Snapshots, 0, 20) + + err := repo.List(ctx, SnapshotFile, func(id ID, size int64) error { + sn, err := LoadSnapshot(ctx, repo, id) + if err != nil { + fmt.Fprintf(os.Stderr, "could not load snapshot %v: %v\n", id.Str(), err) + return nil + } + + if (host != "" && host != sn.Hostname) || !sn.HasTagList(tags) || !sn.HasPaths(paths) { + return nil + } + + results = append(results, sn) + return nil + }) + + if err != nil { + return nil, err + } + + return results, nil +} diff --git a/internal/restic/snapshot_policy.go b/internal/restic/snapshot_policy.go new file mode 100644 index 000000000..08ed843c8 --- /dev/null +++ b/internal/restic/snapshot_policy.go @@ -0,0 +1,239 @@ +package restic + +import ( + "fmt" + "reflect" + "sort" + "strings" + "time" + + "github.com/restic/restic/internal/debug" +) + +// ExpirePolicy configures which snapshots should be automatically removed. +type ExpirePolicy struct { + Last int // keep the last n snapshots + Hourly int // keep the last n hourly snapshots + Daily int // keep the last n daily snapshots + Weekly int // keep the last n weekly snapshots + Monthly int // keep the last n monthly snapshots + Yearly int // keep the last n yearly snapshots + Within Duration // keep snapshots made within this duration + Tags []TagList // keep all snapshots that include at least one of the tag lists. +} + +func (e ExpirePolicy) String() (s string) { + var keeps []string + if e.Last > 0 { + keeps = append(keeps, fmt.Sprintf("%d snapshots", e.Last)) + } + if e.Hourly > 0 { + keeps = append(keeps, fmt.Sprintf("%d hourly", e.Hourly)) + } + if e.Daily > 0 { + keeps = append(keeps, fmt.Sprintf("%d daily", e.Daily)) + } + if e.Weekly > 0 { + keeps = append(keeps, fmt.Sprintf("%d weekly", e.Weekly)) + } + if e.Monthly > 0 { + keeps = append(keeps, fmt.Sprintf("%d monthly", e.Monthly)) + } + if e.Yearly > 0 { + keeps = append(keeps, fmt.Sprintf("%d yearly", e.Yearly)) + } + + if len(keeps) > 0 { + s = fmt.Sprintf("keep the last %s snapshots", strings.Join(keeps, ", ")) + } + + if len(e.Tags) > 0 { + if s != "" { + s += " and " + } + s += fmt.Sprintf("all snapshots with tags %s", e.Tags) + } + + if !e.Within.Zero() { + if s != "" { + s += " and " + } + s += fmt.Sprintf("all snapshots within %s of the newest", e.Within) + } + + return s +} + +// Sum returns the maximum number of snapshots to be kept according to this +// policy. +func (e ExpirePolicy) Sum() int { + return e.Last + e.Hourly + e.Daily + e.Weekly + e.Monthly + e.Yearly +} + +// Empty returns true iff no policy has been configured (all values zero). +func (e ExpirePolicy) Empty() bool { + if len(e.Tags) != 0 { + return false + } + + empty := ExpirePolicy{Tags: e.Tags} + return reflect.DeepEqual(e, empty) +} + +// ymdh returns an integer in the form YYYYMMDDHH. +func ymdh(d time.Time, _ int) int { + return d.Year()*1000000 + int(d.Month())*10000 + d.Day()*100 + d.Hour() +} + +// ymd returns an integer in the form YYYYMMDD. +func ymd(d time.Time, _ int) int { + return d.Year()*10000 + int(d.Month())*100 + d.Day() +} + +// yw returns an integer in the form YYYYWW, where WW is the week number. +func yw(d time.Time, _ int) int { + year, week := d.ISOWeek() + return year*100 + week +} + +// ym returns an integer in the form YYYYMM. +func ym(d time.Time, _ int) int { + return d.Year()*100 + int(d.Month()) +} + +// y returns the year of d. +func y(d time.Time, _ int) int { + return d.Year() +} + +// always returns a unique number for d. +func always(d time.Time, nr int) int { + return nr +} + +// findLatestTimestamp returns the time stamp for the newest snapshot. +func findLatestTimestamp(list Snapshots) time.Time { + if len(list) == 0 { + panic("list of snapshots is empty") + } + + var latest time.Time + for _, sn := range list { + if sn.Time.After(latest) { + latest = sn.Time + } + } + + return latest +} + +// KeepReason specifies why a particular snapshot was kept, and the counters at +// that point in the policy evaluation. +type KeepReason struct { + Snapshot *Snapshot `json:"snapshot"` + + // description text which criteria match, e.g. "daily", "monthly" + Matches []string `json:"matches"` + + // the counters after evaluating the current snapshot + Counters struct { + Last int `json:"last,omitempty"` + Hourly int `json:"hourly,omitempty"` + Daily int `json:"daily,omitempty"` + Weekly int `json:"weekly,omitempty"` + Monthly int `json:"monthly,omitempty"` + Yearly int `json:"yearly,omitempty"` + } `json:"counters"` +} + +// ApplyPolicy returns the snapshots from list that are to be kept and removed +// according to the policy p. list is sorted in the process. reasons contains +// the reasons to keep each snapshot, it is in the same order as keep. +func ApplyPolicy(list Snapshots, p ExpirePolicy) (keep, remove Snapshots, reasons []KeepReason) { + sort.Sort(list) + + if p.Empty() { + for _, sn := range list { + reasons = append(reasons, KeepReason{ + Snapshot: sn, + Matches: []string{"policy is empty"}, + }) + } + return list, remove, reasons + } + + if len(list) == 0 { + return list, nil, nil + } + + var buckets = [6]struct { + Count int + bucker func(d time.Time, nr int) int + Last int + reason string + }{ + {p.Last, always, -1, "last snapshot"}, + {p.Hourly, ymdh, -1, "hourly snapshot"}, + {p.Daily, ymd, -1, "daily snapshot"}, + {p.Weekly, yw, -1, "weekly snapshot"}, + {p.Monthly, ym, -1, "monthly snapshot"}, + {p.Yearly, y, -1, "yearly snapshot"}, + } + + latest := findLatestTimestamp(list) + + for nr, cur := range list { + var keepSnap bool + var keepSnapReasons []string + + // Tags are handled specially as they are not counted. + for _, l := range p.Tags { + if cur.HasTags(l) { + keepSnap = true + keepSnapReasons = append(keepSnapReasons, fmt.Sprintf("has tags %v", l)) + } + } + + // If the timestamp of the snapshot is within the range, then keep it. + if !p.Within.Zero() { + t := latest.AddDate(-p.Within.Years, -p.Within.Months, -p.Within.Days).Add(time.Hour * time.Duration(-p.Within.Hours)) + if cur.Time.After(t) { + keepSnap = true + keepSnapReasons = append(keepSnapReasons, fmt.Sprintf("within %v", p.Within)) + } + } + + // Now update the other buckets and see if they have some counts left. + for i, b := range buckets { + if b.Count > 0 { + val := b.bucker(cur.Time, nr) + if val != b.Last { + debug.Log("keep %v %v, bucker %v, val %v\n", cur.Time, cur.id.Str(), i, val) + keepSnap = true + buckets[i].Last = val + buckets[i].Count-- + keepSnapReasons = append(keepSnapReasons, b.reason) + } + } + } + + if keepSnap { + keep = append(keep, cur) + kr := KeepReason{ + Snapshot: cur, + Matches: keepSnapReasons, + } + kr.Counters.Last = buckets[0].Count + kr.Counters.Hourly = buckets[1].Count + kr.Counters.Daily = buckets[2].Count + kr.Counters.Weekly = buckets[3].Count + kr.Counters.Monthly = buckets[4].Count + kr.Counters.Yearly = buckets[5].Count + reasons = append(reasons, kr) + } else { + remove = append(remove, cur) + } + } + + return keep, remove, reasons +} diff --git a/internal/restic/snapshot_policy_test.go b/internal/restic/snapshot_policy_test.go new file mode 100644 index 000000000..7c9be67e7 --- /dev/null +++ b/internal/restic/snapshot_policy_test.go @@ -0,0 +1,271 @@ +package restic_test + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "path/filepath" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/restic/restic/internal/restic" +) + +func parseTimeUTC(s string) time.Time { + t, err := time.Parse("2006-01-02 15:04:05", s) + if err != nil { + panic(err) + } + + return t.UTC() +} + +func parseDuration(s string) restic.Duration { + d, err := restic.ParseDuration(s) + if err != nil { + panic(err) + } + + return d +} + +func TestExpireSnapshotOps(t *testing.T) { + data := []struct { + expectEmpty bool + expectSum int + p *restic.ExpirePolicy + }{ + {true, 0, &restic.ExpirePolicy{}}, + {true, 0, &restic.ExpirePolicy{Tags: []restic.TagList{}}}, + {false, 22, &restic.ExpirePolicy{Daily: 7, Weekly: 2, Monthly: 3, Yearly: 10}}, + } + for i, d := range data { + isEmpty := d.p.Empty() + if isEmpty != d.expectEmpty { + t.Errorf("empty test %v: wrong result, want:\n %#v\ngot:\n %#v", i, d.expectEmpty, isEmpty) + } + hasSum := d.p.Sum() + if hasSum != d.expectSum { + t.Errorf("sum test %v: wrong result, want:\n %#v\ngot:\n %#v", i, d.expectSum, hasSum) + } + } +} + +// ApplyPolicyResult is used to marshal/unmarshal the golden files for +// TestApplyPolicy. +type ApplyPolicyResult struct { + Keep restic.Snapshots `json:"keep"` + Reasons []restic.KeepReason `json:"reasons,omitempty"` +} + +func loadGoldenFile(t testing.TB, filename string) (res ApplyPolicyResult) { + buf, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatalf("error loading golden file %v: %v", filename, err) + } + + err = json.Unmarshal(buf, &res) + if err != nil { + t.Fatalf("error unmarshalling golden file %v: %v", filename, err) + } + + return res +} + +func saveGoldenFile(t testing.TB, filename string, keep restic.Snapshots, reasons []restic.KeepReason) { + res := ApplyPolicyResult{ + Keep: keep, + Reasons: reasons, + } + + buf, err := json.MarshalIndent(res, "", " ") + if err != nil { + t.Fatalf("error marshaling result: %v", err) + } + + if err = ioutil.WriteFile(filename, buf, 0644); err != nil { + t.Fatalf("unable to update golden file: %v", err) + } +} + +func TestApplyPolicy(t *testing.T) { + var testExpireSnapshots = restic.Snapshots{ + {Time: parseTimeUTC("2014-09-01 10:20:30")}, + {Time: parseTimeUTC("2014-09-02 10:20:30")}, + {Time: parseTimeUTC("2014-09-05 10:20:30")}, + {Time: parseTimeUTC("2014-09-06 10:20:30")}, + {Time: parseTimeUTC("2014-09-08 10:20:30")}, + {Time: parseTimeUTC("2014-09-09 10:20:30")}, + {Time: parseTimeUTC("2014-09-10 10:20:30")}, + {Time: parseTimeUTC("2014-09-11 10:20:30")}, + {Time: parseTimeUTC("2014-09-20 10:20:30")}, + {Time: parseTimeUTC("2014-09-22 10:20:30")}, + {Time: parseTimeUTC("2014-08-08 10:20:30")}, + {Time: parseTimeUTC("2014-08-10 10:20:30")}, + {Time: parseTimeUTC("2014-08-12 10:20:30")}, + {Time: parseTimeUTC("2014-08-13 10:20:30")}, + {Time: parseTimeUTC("2014-08-13 10:20:30.1")}, + {Time: parseTimeUTC("2014-08-15 10:20:30")}, + {Time: parseTimeUTC("2014-08-18 10:20:30")}, + {Time: parseTimeUTC("2014-08-20 10:20:30")}, + {Time: parseTimeUTC("2014-08-21 10:20:30")}, + {Time: parseTimeUTC("2014-08-22 10:20:30")}, + {Time: parseTimeUTC("2014-10-01 10:20:30"), Tags: []string{"foo"}}, + {Time: parseTimeUTC("2014-10-02 10:20:30"), Tags: []string{"foo"}}, + {Time: parseTimeUTC("2014-10-05 10:20:30"), Tags: []string{"foo"}}, + {Time: parseTimeUTC("2014-10-06 10:20:30"), Tags: []string{"foo"}}, + {Time: parseTimeUTC("2014-10-08 10:20:30"), Tags: []string{"foo"}}, + {Time: parseTimeUTC("2014-10-09 10:20:30"), Tags: []string{"foo"}}, + {Time: parseTimeUTC("2014-10-10 10:20:30"), Tags: []string{"foo"}}, + {Time: parseTimeUTC("2014-10-11 10:20:30"), Tags: []string{"foo"}}, + {Time: parseTimeUTC("2014-10-20 10:20:30"), Tags: []string{"foo"}}, + {Time: parseTimeUTC("2014-10-22 10:20:30"), Tags: []string{"foo"}}, + {Time: parseTimeUTC("2014-11-08 10:20:30"), Tags: []string{"foo"}}, + {Time: parseTimeUTC("2014-11-10 10:20:30"), Tags: []string{"foo"}}, + {Time: parseTimeUTC("2014-11-12 10:20:30"), Tags: []string{"foo"}}, + {Time: parseTimeUTC("2014-11-13 10:20:30"), Tags: []string{"foo"}}, + {Time: parseTimeUTC("2014-11-13 10:20:30.1"), Tags: []string{"bar"}}, + {Time: parseTimeUTC("2014-11-15 10:20:30"), Tags: []string{"foo", "bar"}}, + {Time: parseTimeUTC("2014-11-18 10:20:30")}, + {Time: parseTimeUTC("2014-11-20 10:20:30")}, + {Time: parseTimeUTC("2014-11-21 10:20:30")}, + {Time: parseTimeUTC("2014-11-22 10:20:30")}, + {Time: parseTimeUTC("2015-09-01 10:20:30")}, + {Time: parseTimeUTC("2015-09-02 10:20:30")}, + {Time: parseTimeUTC("2015-09-05 10:20:30")}, + {Time: parseTimeUTC("2015-09-06 10:20:30")}, + {Time: parseTimeUTC("2015-09-08 10:20:30")}, + {Time: parseTimeUTC("2015-09-09 10:20:30")}, + {Time: parseTimeUTC("2015-09-10 10:20:30")}, + {Time: parseTimeUTC("2015-09-11 10:20:30")}, + {Time: parseTimeUTC("2015-09-20 10:20:30")}, + {Time: parseTimeUTC("2015-09-22 10:20:30")}, + {Time: parseTimeUTC("2015-08-08 10:20:30")}, + {Time: parseTimeUTC("2015-08-10 10:20:30")}, + {Time: parseTimeUTC("2015-08-12 10:20:30")}, + {Time: parseTimeUTC("2015-08-13 10:20:30")}, + {Time: parseTimeUTC("2015-08-13 10:20:30.1")}, + {Time: parseTimeUTC("2015-08-15 10:20:30")}, + {Time: parseTimeUTC("2015-08-18 10:20:30")}, + {Time: parseTimeUTC("2015-08-20 10:20:30")}, + {Time: parseTimeUTC("2015-08-21 10:20:30")}, + {Time: parseTimeUTC("2015-08-22 10:20:30")}, + {Time: parseTimeUTC("2015-10-01 10:20:30")}, + {Time: parseTimeUTC("2015-10-02 10:20:30")}, + {Time: parseTimeUTC("2015-10-05 10:20:30")}, + {Time: parseTimeUTC("2015-10-06 10:20:30")}, + {Time: parseTimeUTC("2015-10-08 10:20:30")}, + {Time: parseTimeUTC("2015-10-09 10:20:30")}, + {Time: parseTimeUTC("2015-10-10 10:20:30")}, + {Time: parseTimeUTC("2015-10-11 10:20:30")}, + {Time: parseTimeUTC("2015-10-20 10:20:30")}, + {Time: parseTimeUTC("2015-10-22 10:20:30")}, + {Time: parseTimeUTC("2015-10-22 10:20:30")}, + {Time: parseTimeUTC("2015-10-22 10:20:30"), Tags: []string{"foo", "bar"}}, + {Time: parseTimeUTC("2015-10-22 10:20:30"), Tags: []string{"foo", "bar"}}, + {Time: parseTimeUTC("2015-10-22 10:20:30"), Tags: []string{"foo", "bar"}, Paths: []string{"path1", "path2"}}, + {Time: parseTimeUTC("2015-11-08 10:20:30")}, + {Time: parseTimeUTC("2015-11-10 10:20:30")}, + {Time: parseTimeUTC("2015-11-12 10:20:30")}, + {Time: parseTimeUTC("2015-11-13 10:20:30")}, + {Time: parseTimeUTC("2015-11-13 10:20:30.1")}, + {Time: parseTimeUTC("2015-11-15 10:20:30")}, + {Time: parseTimeUTC("2015-11-18 10:20:30")}, + {Time: parseTimeUTC("2015-11-20 10:20:30")}, + {Time: parseTimeUTC("2015-11-21 10:20:30")}, + {Time: parseTimeUTC("2015-11-22 10:20:30")}, + {Time: parseTimeUTC("2016-01-01 01:02:03")}, + {Time: parseTimeUTC("2016-01-01 01:03:03")}, + {Time: parseTimeUTC("2016-01-01 07:08:03")}, + {Time: parseTimeUTC("2016-01-03 07:02:03")}, + {Time: parseTimeUTC("2016-01-04 10:23:03")}, + {Time: parseTimeUTC("2016-01-04 11:23:03")}, + {Time: parseTimeUTC("2016-01-04 12:23:03")}, + {Time: parseTimeUTC("2016-01-04 12:24:03")}, + {Time: parseTimeUTC("2016-01-04 12:28:03")}, + {Time: parseTimeUTC("2016-01-04 12:30:03")}, + {Time: parseTimeUTC("2016-01-04 16:23:03")}, + {Time: parseTimeUTC("2016-01-05 09:02:03")}, + {Time: parseTimeUTC("2016-01-06 08:02:03")}, + {Time: parseTimeUTC("2016-01-07 10:02:03")}, + {Time: parseTimeUTC("2016-01-08 20:02:03")}, + {Time: parseTimeUTC("2016-01-09 21:02:03")}, + {Time: parseTimeUTC("2016-01-12 21:02:03")}, + {Time: parseTimeUTC("2016-01-12 21:08:03")}, + {Time: parseTimeUTC("2016-01-18 12:02:03")}, + } + + var tests = []restic.ExpirePolicy{ + {}, + {Last: 10}, + {Last: 15}, + {Last: 99}, + {Last: 200}, + {Hourly: 20}, + {Daily: 3}, + {Daily: 10}, + {Daily: 30}, + {Last: 5, Daily: 5}, + {Last: 2, Daily: 10}, + {Weekly: 2}, + {Weekly: 4}, + {Daily: 3, Weekly: 4}, + {Monthly: 6}, + {Daily: 2, Weekly: 2, Monthly: 6}, + {Yearly: 10}, + {Daily: 7, Weekly: 2, Monthly: 3, Yearly: 10}, + {Tags: []restic.TagList{{"foo"}}}, + {Tags: []restic.TagList{{"foo", "bar"}}}, + {Tags: []restic.TagList{{"foo"}, {"bar"}}}, + {Within: parseDuration("1d")}, + {Within: parseDuration("2d")}, + {Within: parseDuration("7d")}, + {Within: parseDuration("1m")}, + {Within: parseDuration("1m14d")}, + {Within: parseDuration("1y1d1m")}, + {Within: parseDuration("13d23h")}, + {Within: parseDuration("2m2h")}, + {Within: parseDuration("1y2m3d3h")}, + } + + for i, p := range tests { + t.Run("", func(t *testing.T) { + + keep, remove, reasons := restic.ApplyPolicy(testExpireSnapshots, p) + + if len(keep)+len(remove) != len(testExpireSnapshots) { + t.Errorf("len(keep)+len(remove) = %d != len(testExpireSnapshots) = %d", + len(keep)+len(remove), len(testExpireSnapshots)) + } + + if p.Sum() > 0 && len(keep) > p.Sum() { + t.Errorf("not enough snapshots removed: policy allows %v snapshots to remain, but ended up with %v", + p.Sum(), len(keep)) + } + + if len(keep) != len(reasons) { + t.Errorf("got %d keep reasons for %d snapshots to keep, these must be equal", len(reasons), len(keep)) + } + + goldenFilename := filepath.Join("testdata", fmt.Sprintf("policy_keep_snapshots_%d", i)) + + if *updateGoldenFiles { + saveGoldenFile(t, goldenFilename, keep, reasons) + } + + want := loadGoldenFile(t, goldenFilename) + + cmpOpts := cmpopts.IgnoreUnexported(restic.Snapshot{}) + + if !cmp.Equal(want.Keep, keep, cmpOpts) { + t.Error(cmp.Diff(want.Keep, keep, cmpOpts)) + } + + if !cmp.Equal(want.Reasons, reasons, cmpOpts) { + t.Error(cmp.Diff(want.Reasons, reasons, cmpOpts)) + } + }) + } +} diff --git a/internal/restic/snapshot_test.go b/internal/restic/snapshot_test.go new file mode 100644 index 000000000..5e1bf8822 --- /dev/null +++ b/internal/restic/snapshot_test.go @@ -0,0 +1,16 @@ +package restic_test + +import ( + "testing" + "time" + + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func TestNewSnapshot(t *testing.T) { + paths := []string{"/home/foobar"} + + _, err := restic.NewSnapshot(paths, nil, "foo", time.Now()) + rtest.OK(t, err) +} diff --git a/internal/restic/tag_list.go b/internal/restic/tag_list.go new file mode 100644 index 000000000..0f8232e5b --- /dev/null +++ b/internal/restic/tag_list.go @@ -0,0 +1,62 @@ +package restic + +import ( + "fmt" + "strings" +) + +// TagList is a list of tags. +type TagList []string + +// splitTagList splits a string into a list of tags. The tags in the string +// need to be separated by commas. Whitespace is stripped around the individual +// tags. +func splitTagList(s string) (l TagList) { + for _, t := range strings.Split(s, ",") { + l = append(l, strings.TrimSpace(t)) + } + return l +} + +func (l TagList) String() string { + return "[" + strings.Join(l, ", ") + "]" +} + +// Set updates the TagList's value. +func (l *TagList) Set(s string) error { + *l = splitTagList(s) + return nil +} + +// Type returns a description of the type. +func (TagList) Type() string { + return "TagList" +} + +// TagLists consists of several TagList. +type TagLists []TagList + +// splitTagLists splits a slice of strings into a slice of TagLists using +// SplitTagList. +func splitTagLists(s []string) (l TagLists) { + l = make([]TagList, 0, len(s)) + for _, t := range s { + l = append(l, splitTagList(t)) + } + return l +} + +func (l TagLists) String() string { + return fmt.Sprintf("%v", []TagList(l)) +} + +// Set updates the TagList's value. +func (l *TagLists) Set(s string) error { + *l = append(*l, splitTagList(s)) + return nil +} + +// Type returns a description of the type. +func (TagLists) Type() string { + return "TagLists" +} diff --git a/internal/restic/testdata/filter_snapshots_0 b/internal/restic/testdata/filter_snapshots_0 new file mode 100644 index 000000000..324bcaa00 --- /dev/null +++ b/internal/restic/testdata/filter_snapshots_0 @@ -0,0 +1,213 @@ +[ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "bar" + ] + }, + { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "bar" + ] + }, + { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": [ + "/usr", + "/sbin" + ], + "hostname": "foo", + "username": "root", + "tags": [ + "fox" + ] + }, + { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": [ + "/usr", + "/sbin" + ], + "hostname": "foo", + "username": "root", + "tags": [ + "foo" + ] + }, + { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "fox" + ] + }, + { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "fox" + ] + }, + { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "foo" + ] + }, + { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "test", + "test2" + ] + }, + { + "time": "2016-01-04T12:30:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "test", + "foo", + "bar" + ] + }, + { + "time": "2016-01-04T12:28:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "test" + ] + }, + { + "time": "2016-01-04T12:24:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "test" + ] + }, + { + "time": "2016-01-04T12:23:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "foo" + ] + }, + { + "time": "2016-01-04T11:23:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "foo" + ] + }, + { + "time": "2016-01-04T10:23:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "foo" + ] + }, + { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "foo" + ] + }, + { + "time": "2016-01-01T01:02:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "foo" + ] + } +] \ No newline at end of file diff --git a/internal/restic/testdata/filter_snapshots_1 b/internal/restic/testdata/filter_snapshots_1 new file mode 100644 index 000000000..f219dcc7c --- /dev/null +++ b/internal/restic/testdata/filter_snapshots_1 @@ -0,0 +1,41 @@ +[ + { + "time": "2016-01-12T21:02:03Z", + "tree": null, + "paths": [ + "/usr", + "/sbin" + ], + "hostname": "bar", + "username": "root", + "tags": [ + "foo" + ] + }, + { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": [ + "/usr", + "/sbin" + ], + "hostname": "foo", + "username": "root", + "tags": [ + "fox" + ] + }, + { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": [ + "/usr", + "/sbin" + ], + "hostname": "foo", + "username": "root", + "tags": [ + "foo" + ] + } +] \ No newline at end of file diff --git a/internal/restic/testdata/filter_snapshots_2 b/internal/restic/testdata/filter_snapshots_2 new file mode 100644 index 000000000..f070335e1 --- /dev/null +++ b/internal/restic/testdata/filter_snapshots_2 @@ -0,0 +1,28 @@ +[ + { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": [ + "/usr", + "/sbin" + ], + "hostname": "foo", + "username": "root", + "tags": [ + "fox" + ] + }, + { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": [ + "/usr", + "/sbin" + ], + "hostname": "foo", + "username": "root", + "tags": [ + "foo" + ] + } +] \ No newline at end of file diff --git a/internal/restic/testdata/filter_snapshots_3 b/internal/restic/testdata/filter_snapshots_3 new file mode 100644 index 000000000..c3fff0936 --- /dev/null +++ b/internal/restic/testdata/filter_snapshots_3 @@ -0,0 +1,213 @@ +[ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "bar" + ] + }, + { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "bar" + ] + }, + { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "fox" + ] + }, + { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "fox" + ] + }, + { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "foo" + ] + }, + { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "test", + "test2" + ] + }, + { + "time": "2016-01-04T12:30:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "test", + "foo", + "bar" + ] + }, + { + "time": "2016-01-04T12:28:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "test" + ] + }, + { + "time": "2016-01-04T12:24:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "test" + ] + }, + { + "time": "2016-01-04T12:23:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "foo" + ] + }, + { + "time": "2016-01-04T11:23:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "foo" + ] + }, + { + "time": "2016-01-04T10:23:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "foo" + ] + }, + { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "foo" + ] + }, + { + "time": "2016-01-01T07:08:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "bar", + "username": "testuser", + "tags": [ + "foo" + ] + }, + { + "time": "2016-01-01T01:03:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "bar", + "username": "testuser", + "tags": [ + "foo" + ] + }, + { + "time": "2016-01-01T01:02:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "foo" + ] + } +] \ No newline at end of file diff --git a/internal/restic/testdata/filter_snapshots_4 b/internal/restic/testdata/filter_snapshots_4 new file mode 100644 index 000000000..4719ddd59 --- /dev/null +++ b/internal/restic/testdata/filter_snapshots_4 @@ -0,0 +1,28 @@ +[ + { + "time": "2016-01-01T07:08:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "bar", + "username": "testuser", + "tags": [ + "foo" + ] + }, + { + "time": "2016-01-01T01:03:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "bar", + "username": "testuser", + "tags": [ + "foo" + ] + } +] \ No newline at end of file diff --git a/internal/restic/testdata/filter_snapshots_5 b/internal/restic/testdata/filter_snapshots_5 new file mode 100644 index 000000000..f070335e1 --- /dev/null +++ b/internal/restic/testdata/filter_snapshots_5 @@ -0,0 +1,28 @@ +[ + { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": [ + "/usr", + "/sbin" + ], + "hostname": "foo", + "username": "root", + "tags": [ + "fox" + ] + }, + { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": [ + "/usr", + "/sbin" + ], + "hostname": "foo", + "username": "root", + "tags": [ + "foo" + ] + } +] \ No newline at end of file diff --git a/internal/restic/testdata/filter_snapshots_6 b/internal/restic/testdata/filter_snapshots_6 new file mode 100644 index 000000000..c711171b7 --- /dev/null +++ b/internal/restic/testdata/filter_snapshots_6 @@ -0,0 +1,147 @@ +[ + { + "time": "2016-01-12T21:02:03Z", + "tree": null, + "paths": [ + "/usr", + "/sbin" + ], + "hostname": "bar", + "username": "root", + "tags": [ + "foo" + ] + }, + { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": [ + "/usr", + "/sbin" + ], + "hostname": "foo", + "username": "root", + "tags": [ + "foo" + ] + }, + { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "foo" + ] + }, + { + "time": "2016-01-04T12:30:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "test", + "foo", + "bar" + ] + }, + { + "time": "2016-01-04T12:23:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "foo" + ] + }, + { + "time": "2016-01-04T11:23:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "foo" + ] + }, + { + "time": "2016-01-04T10:23:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "foo" + ] + }, + { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "foo" + ] + }, + { + "time": "2016-01-01T07:08:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "bar", + "username": "testuser", + "tags": [ + "foo" + ] + }, + { + "time": "2016-01-01T01:03:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "bar", + "username": "testuser", + "tags": [ + "foo" + ] + }, + { + "time": "2016-01-01T01:02:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "foo" + ] + } +] \ No newline at end of file diff --git a/internal/restic/testdata/filter_snapshots_7 b/internal/restic/testdata/filter_snapshots_7 new file mode 100644 index 000000000..bb0924dbe --- /dev/null +++ b/internal/restic/testdata/filter_snapshots_7 @@ -0,0 +1,15 @@ +[ + { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": [ + "/usr", + "/sbin" + ], + "hostname": "foo", + "username": "root", + "tags": [ + "fox" + ] + } +] \ No newline at end of file diff --git a/internal/restic/testdata/filter_snapshots_8 b/internal/restic/testdata/filter_snapshots_8 new file mode 100644 index 000000000..fbff1e883 --- /dev/null +++ b/internal/restic/testdata/filter_snapshots_8 @@ -0,0 +1,17 @@ +[ + { + "time": "2016-01-04T12:30:03Z", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser", + "tags": [ + "test", + "foo", + "bar" + ] + } +] \ No newline at end of file diff --git a/internal/restic/testdata/filter_snapshots_9 b/internal/restic/testdata/filter_snapshots_9 new file mode 100644 index 000000000..ec747fa47 --- /dev/null +++ b/internal/restic/testdata/filter_snapshots_9 @@ -0,0 +1 @@ +null \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_0 b/internal/restic/testdata/policy_keep_snapshots_0 new file mode 100644 index 000000000..1290b88cf --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_0 @@ -0,0 +1,1782 @@ +{ + "keep": [ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:30:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:28:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:24:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T11:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T10:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T07:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T01:03:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T01:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-21T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-18T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-15T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-13T10:20:30.1Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-13T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-12T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-10T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-08T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": [ + "path1", + "path2" + ], + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2015-10-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-11T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-10T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-09T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-08T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-06T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-05T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-02T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-01T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-11T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-10T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-09T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-08T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-06T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-05T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-02T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-01T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-21T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-18T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-15T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-13T10:20:30.1Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-13T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-12T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-10T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-08T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-11-21T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-11-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-11-18T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-11-15T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2014-11-13T10:20:30.1Z", + "tree": null, + "paths": null, + "tags": [ + "bar" + ] + }, + { + "time": "2014-11-13T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-11-12T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-11-10T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-11-08T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-20T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-11T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-10T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-09T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-08T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-06T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-05T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-02T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-01T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-09-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-09-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-09-11T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-09-10T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-09-09T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-09-08T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-09-06T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-09-05T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-09-02T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-09-01T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-08-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-08-21T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-08-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-08-18T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-08-15T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-08-13T10:20:30.1Z", + "tree": null, + "paths": null + }, + { + "time": "2014-08-13T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-08-12T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-08-10T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-08-08T10:20:30Z", + "tree": null, + "paths": null + } + ], + "reasons": [ + { + "snapshot": { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-12T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T12:30:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T12:28:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T12:24:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T12:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T11:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T10:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-01T07:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-01T01:03:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-01T01:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-21T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-18T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-15T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-13T10:20:30.1Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-13T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-12T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-10T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-08T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": [ + "path1", + "path2" + ], + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-11T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-10T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-09T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-08T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-06T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-05T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-02T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-01T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-11T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-10T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-09T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-08T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-06T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-05T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-02T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-01T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-21T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-18T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-15T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-13T10:20:30.1Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-13T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-12T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-10T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-08T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-11-21T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-11-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-11-18T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-11-15T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-11-13T10:20:30.1Z", + "tree": null, + "paths": null, + "tags": [ + "bar" + ] + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-11-13T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-11-12T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-11-10T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-11-08T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-20T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-11T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-10T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-09T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-08T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-06T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-05T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-02T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-01T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-09-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-09-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-09-11T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-09-10T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-09-09T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-09-08T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-09-06T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-09-05T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-09-02T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-09-01T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-08-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-08-21T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-08-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-08-18T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-08-15T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-08-13T10:20:30.1Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-08-13T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-08-12T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-08-10T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-08-08T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "policy is empty" + ], + "counters": {} + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_1 b/internal/restic/testdata/policy_keep_snapshots_1 new file mode 100644 index 000000000..060faeff0 --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_1 @@ -0,0 +1,184 @@ +{ + "keep": [ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:30:03Z", + "tree": null, + "paths": null + } + ], + "reasons": [ + { + "snapshot": { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 9 + } + }, + { + "snapshot": { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 8 + } + }, + { + "snapshot": { + "time": "2016-01-12T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 7 + } + }, + { + "snapshot": { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 6 + } + }, + { + "snapshot": { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 5 + } + }, + { + "snapshot": { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 4 + } + }, + { + "snapshot": { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 3 + } + }, + { + "snapshot": { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 2 + } + }, + { + "snapshot": { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 1 + } + }, + { + "snapshot": { + "time": "2016-01-04T12:30:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": {} + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_10 b/internal/restic/testdata/policy_keep_snapshots_10 new file mode 100644 index 000000000..1a596e838 --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_10 @@ -0,0 +1,187 @@ +{ + "keep": [ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T07:08:03Z", + "tree": null, + "paths": null + } + ], + "reasons": [ + { + "snapshot": { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot", + "daily snapshot" + ], + "counters": { + "last": 1, + "daily": 9 + } + }, + { + "snapshot": { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot", + "daily snapshot" + ], + "counters": { + "daily": 8 + } + }, + { + "snapshot": { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 7 + } + }, + { + "snapshot": { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 6 + } + }, + { + "snapshot": { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 5 + } + }, + { + "snapshot": { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 4 + } + }, + { + "snapshot": { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 3 + } + }, + { + "snapshot": { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 2 + } + }, + { + "snapshot": { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 1 + } + }, + { + "snapshot": { + "time": "2016-01-01T07:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": {} + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_11 b/internal/restic/testdata/policy_keep_snapshots_11 new file mode 100644 index 000000000..17586c120 --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_11 @@ -0,0 +1,40 @@ +{ + "keep": [ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + } + ], + "reasons": [ + { + "snapshot": { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "weekly snapshot" + ], + "counters": { + "weekly": 1 + } + }, + { + "snapshot": { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "weekly snapshot" + ], + "counters": {} + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_12 b/internal/restic/testdata/policy_keep_snapshots_12 new file mode 100644 index 000000000..c0ea0a9b1 --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_12 @@ -0,0 +1,76 @@ +{ + "keep": [ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": null + } + ], + "reasons": [ + { + "snapshot": { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "weekly snapshot" + ], + "counters": { + "weekly": 3 + } + }, + { + "snapshot": { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "weekly snapshot" + ], + "counters": { + "weekly": 2 + } + }, + { + "snapshot": { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "weekly snapshot" + ], + "counters": { + "weekly": 1 + } + }, + { + "snapshot": { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "weekly snapshot" + ], + "counters": {} + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_13 b/internal/restic/testdata/policy_keep_snapshots_13 new file mode 100644 index 000000000..b52b9a8bc --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_13 @@ -0,0 +1,81 @@ +{ + "keep": [ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": null + } + ], + "reasons": [ + { + "snapshot": { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot", + "weekly snapshot" + ], + "counters": { + "daily": 2, + "weekly": 3 + } + }, + { + "snapshot": { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot", + "weekly snapshot" + ], + "counters": { + "daily": 1, + "weekly": 2 + } + }, + { + "snapshot": { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot", + "weekly snapshot" + ], + "counters": { + "weekly": 1 + } + }, + { + "snapshot": { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "weekly snapshot" + ], + "counters": {} + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_14 b/internal/restic/testdata/policy_keep_snapshots_14 new file mode 100644 index 000000000..cd30be6cd --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_14 @@ -0,0 +1,112 @@ +{ + "keep": [ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-11-22T10:20:30Z", + "tree": null, + "paths": null + } + ], + "reasons": [ + { + "snapshot": { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "monthly snapshot" + ], + "counters": { + "monthly": 5 + } + }, + { + "snapshot": { + "time": "2015-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "monthly snapshot" + ], + "counters": { + "monthly": 4 + } + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "monthly snapshot" + ], + "counters": { + "monthly": 3 + } + }, + { + "snapshot": { + "time": "2015-09-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "monthly snapshot" + ], + "counters": { + "monthly": 2 + } + }, + { + "snapshot": { + "time": "2015-08-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "monthly snapshot" + ], + "counters": { + "monthly": 1 + } + }, + { + "snapshot": { + "time": "2014-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "monthly snapshot" + ], + "counters": {} + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_15 b/internal/restic/testdata/policy_keep_snapshots_15 new file mode 100644 index 000000000..5aa61fe49 --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_15 @@ -0,0 +1,135 @@ +{ + "keep": [ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-11-22T10:20:30Z", + "tree": null, + "paths": null + } + ], + "reasons": [ + { + "snapshot": { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot", + "weekly snapshot", + "monthly snapshot" + ], + "counters": { + "daily": 1, + "weekly": 1, + "monthly": 5 + } + }, + { + "snapshot": { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot", + "weekly snapshot" + ], + "counters": { + "monthly": 5 + } + }, + { + "snapshot": { + "time": "2015-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "monthly snapshot" + ], + "counters": { + "monthly": 4 + } + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "monthly snapshot" + ], + "counters": { + "monthly": 3 + } + }, + { + "snapshot": { + "time": "2015-09-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "monthly snapshot" + ], + "counters": { + "monthly": 2 + } + }, + { + "snapshot": { + "time": "2015-08-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "monthly snapshot" + ], + "counters": { + "monthly": 1 + } + }, + { + "snapshot": { + "time": "2014-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "monthly snapshot" + ], + "counters": {} + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_16 b/internal/restic/testdata/policy_keep_snapshots_16 new file mode 100644 index 000000000..d0cae94b5 --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_16 @@ -0,0 +1,60 @@ +{ + "keep": [ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-11-22T10:20:30Z", + "tree": null, + "paths": null + } + ], + "reasons": [ + { + "snapshot": { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "yearly snapshot" + ], + "counters": { + "yearly": 9 + } + }, + { + "snapshot": { + "time": "2015-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "yearly snapshot" + ], + "counters": { + "yearly": 8 + } + }, + { + "snapshot": { + "time": "2014-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "yearly snapshot" + ], + "counters": { + "yearly": 7 + } + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_17 b/internal/restic/testdata/policy_keep_snapshots_17 new file mode 100644 index 000000000..742b8005b --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_17 @@ -0,0 +1,206 @@ +{ + "keep": [ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-11-22T10:20:30Z", + "tree": null, + "paths": null + } + ], + "reasons": [ + { + "snapshot": { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot", + "weekly snapshot", + "monthly snapshot", + "yearly snapshot" + ], + "counters": { + "daily": 6, + "weekly": 1, + "monthly": 2, + "yearly": 9 + } + }, + { + "snapshot": { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot", + "weekly snapshot" + ], + "counters": { + "daily": 5, + "monthly": 2, + "yearly": 9 + } + }, + { + "snapshot": { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 4, + "monthly": 2, + "yearly": 9 + } + }, + { + "snapshot": { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 3, + "monthly": 2, + "yearly": 9 + } + }, + { + "snapshot": { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 2, + "monthly": 2, + "yearly": 9 + } + }, + { + "snapshot": { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 1, + "monthly": 2, + "yearly": 9 + } + }, + { + "snapshot": { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "monthly": 2, + "yearly": 9 + } + }, + { + "snapshot": { + "time": "2015-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "monthly snapshot", + "yearly snapshot" + ], + "counters": { + "monthly": 1, + "yearly": 8 + } + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "monthly snapshot" + ], + "counters": { + "yearly": 8 + } + }, + { + "snapshot": { + "time": "2014-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "yearly snapshot" + ], + "counters": { + "yearly": 7 + } + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_18 b/internal/restic/testdata/policy_keep_snapshots_18 new file mode 100644 index 000000000..cf63c45b8 --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_18 @@ -0,0 +1,416 @@ +{ + "keep": [ + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": [ + "path1", + "path2" + ], + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2014-11-15T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2014-11-13T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-11-12T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-11-10T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-11-08T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-20T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-11T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-10T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-09T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-08T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-06T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-05T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-02T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-01T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + } + ], + "reasons": [ + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": [ + "path1", + "path2" + ], + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-11-15T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-11-13T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-11-12T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-11-10T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-11-08T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-20T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-11T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-10T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-09T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-08T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-06T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-05T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-02T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-01T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_19 b/internal/restic/testdata/policy_keep_snapshots_19 new file mode 100644 index 000000000..81a438313 --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_19 @@ -0,0 +1,108 @@ +{ + "keep": [ + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": [ + "path1", + "path2" + ], + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2014-11-15T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + } + ], + "reasons": [ + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": [ + "path1", + "path2" + ], + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "has tags [foo, bar]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "has tags [foo, bar]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "has tags [foo, bar]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-11-15T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "has tags [foo, bar]" + ], + "counters": {} + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_2 b/internal/restic/testdata/policy_keep_snapshots_2 new file mode 100644 index 000000000..05992b4fb --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_2 @@ -0,0 +1,274 @@ +{ + "keep": [ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:30:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:28:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:24:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T11:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T10:23:03Z", + "tree": null, + "paths": null + } + ], + "reasons": [ + { + "snapshot": { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 14 + } + }, + { + "snapshot": { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 13 + } + }, + { + "snapshot": { + "time": "2016-01-12T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 12 + } + }, + { + "snapshot": { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 11 + } + }, + { + "snapshot": { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 10 + } + }, + { + "snapshot": { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 9 + } + }, + { + "snapshot": { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 8 + } + }, + { + "snapshot": { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 7 + } + }, + { + "snapshot": { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 6 + } + }, + { + "snapshot": { + "time": "2016-01-04T12:30:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 5 + } + }, + { + "snapshot": { + "time": "2016-01-04T12:28:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 4 + } + }, + { + "snapshot": { + "time": "2016-01-04T12:24:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 3 + } + }, + { + "snapshot": { + "time": "2016-01-04T12:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 2 + } + }, + { + "snapshot": { + "time": "2016-01-04T11:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 1 + } + }, + { + "snapshot": { + "time": "2016-01-04T10:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": {} + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_20 b/internal/restic/testdata/policy_keep_snapshots_20 new file mode 100644 index 000000000..a57fcf024 --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_20 @@ -0,0 +1,442 @@ +{ + "keep": [ + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": [ + "path1", + "path2" + ], + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2014-11-15T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2014-11-13T10:20:30.1Z", + "tree": null, + "paths": null, + "tags": [ + "bar" + ] + }, + { + "time": "2014-11-13T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-11-12T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-11-10T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-11-08T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-20T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-11T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-10T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-09T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-08T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-06T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-05T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-02T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-01T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + } + ], + "reasons": [ + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": [ + "path1", + "path2" + ], + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "has tags [foo]", + "has tags [bar]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "has tags [foo]", + "has tags [bar]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "has tags [foo]", + "has tags [bar]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-11-15T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "has tags [foo]", + "has tags [bar]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-11-13T10:20:30.1Z", + "tree": null, + "paths": null, + "tags": [ + "bar" + ] + }, + "matches": [ + "has tags [bar]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-11-13T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-11-12T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-11-10T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-11-08T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-20T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-11T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-10T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-09T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-08T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-06T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-05T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-02T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-10-01T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "has tags [foo]" + ], + "counters": {} + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_21 b/internal/restic/testdata/policy_keep_snapshots_21 new file mode 100644 index 000000000..1d1cb5b47 --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_21 @@ -0,0 +1,22 @@ +{ + "keep": [ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + } + ], + "reasons": [ + { + "snapshot": { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1d" + ], + "counters": {} + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_22 b/internal/restic/testdata/policy_keep_snapshots_22 new file mode 100644 index 000000000..f1e195656 --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_22 @@ -0,0 +1,22 @@ +{ + "keep": [ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + } + ], + "reasons": [ + { + "snapshot": { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 2d" + ], + "counters": {} + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_23 b/internal/restic/testdata/policy_keep_snapshots_23 new file mode 100644 index 000000000..fb38d7347 --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_23 @@ -0,0 +1,54 @@ +{ + "keep": [ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:02:03Z", + "tree": null, + "paths": null + } + ], + "reasons": [ + { + "snapshot": { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 7d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 7d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-12T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 7d" + ], + "counters": {} + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_24 b/internal/restic/testdata/policy_keep_snapshots_24 new file mode 100644 index 000000000..53c6ba912 --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_24 @@ -0,0 +1,310 @@ +{ + "keep": [ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:30:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:28:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:24:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T11:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T10:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T07:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T01:03:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T01:02:03Z", + "tree": null, + "paths": null + } + ], + "reasons": [ + { + "snapshot": { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-12T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T12:30:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T12:28:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T12:24:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T12:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T11:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T10:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-01T07:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-01T01:03:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-01T01:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m" + ], + "counters": {} + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_25 b/internal/restic/testdata/policy_keep_snapshots_25 new file mode 100644 index 000000000..e99d505ad --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_25 @@ -0,0 +1,310 @@ +{ + "keep": [ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:30:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:28:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:24:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T11:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T10:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T07:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T01:03:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T01:02:03Z", + "tree": null, + "paths": null + } + ], + "reasons": [ + { + "snapshot": { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m14d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m14d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-12T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m14d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m14d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m14d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m14d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m14d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m14d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m14d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T12:30:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m14d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T12:28:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m14d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T12:24:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m14d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T12:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m14d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T11:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m14d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T10:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m14d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m14d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-01T07:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m14d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-01T01:03:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m14d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-01T01:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1m14d" + ], + "counters": {} + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_26 b/internal/restic/testdata/policy_keep_snapshots_26 new file mode 100644 index 000000000..61703f8fe --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_26 @@ -0,0 +1,1044 @@ +{ + "keep": [ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:30:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:28:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:24:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T11:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T10:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T07:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T01:03:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T01:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-21T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-18T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-15T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-13T10:20:30.1Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-13T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-12T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-10T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-08T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": [ + "path1", + "path2" + ], + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2015-10-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-11T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-10T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-09T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-08T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-06T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-05T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-02T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-01T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-11T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-10T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-09T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-08T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-06T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-05T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-02T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-01T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-21T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-18T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-15T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-13T10:20:30.1Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-13T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-12T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-10T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-08T10:20:30Z", + "tree": null, + "paths": null + } + ], + "reasons": [ + { + "snapshot": { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-12T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T12:30:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T12:28:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T12:24:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T12:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T11:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T10:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-01T07:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-01T01:03:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-01T01:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-21T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-18T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-15T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-13T10:20:30.1Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-13T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-12T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-10T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-08T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": [ + "path1", + "path2" + ], + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-11T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-10T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-09T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-08T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-06T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-05T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-02T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-01T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-11T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-10T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-09T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-08T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-06T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-05T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-02T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-01T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-21T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-18T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-15T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-13T10:20:30.1Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-13T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-12T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-10T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-08T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y1m1d" + ], + "counters": {} + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_27 b/internal/restic/testdata/policy_keep_snapshots_27 new file mode 100644 index 000000000..a70c49525 --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_27 @@ -0,0 +1,150 @@ +{ + "keep": [ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + } + ], + "reasons": [ + { + "snapshot": { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 13d23h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 13d23h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-12T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 13d23h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 13d23h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 13d23h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 13d23h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 13d23h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 13d23h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 13d23h" + ], + "counters": {} + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_28 b/internal/restic/testdata/policy_keep_snapshots_28 new file mode 100644 index 000000000..d78171a2f --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_28 @@ -0,0 +1,374 @@ +{ + "keep": [ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:30:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:28:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:24:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T11:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T10:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T07:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T01:03:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T01:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-21T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-18T10:20:30Z", + "tree": null, + "paths": null + } + ], + "reasons": [ + { + "snapshot": { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 2m2h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 2m2h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-12T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 2m2h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 2m2h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 2m2h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 2m2h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 2m2h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 2m2h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 2m2h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T12:30:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 2m2h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T12:28:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 2m2h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T12:24:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 2m2h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T12:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 2m2h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T11:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 2m2h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T10:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 2m2h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 2m2h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-01T07:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 2m2h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-01T01:03:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 2m2h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-01T01:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 2m2h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 2m2h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-21T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 2m2h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 2m2h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-18T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 2m2h" + ], + "counters": {} + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_29 b/internal/restic/testdata/policy_keep_snapshots_29 new file mode 100644 index 000000000..172d3000f --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_29 @@ -0,0 +1,1132 @@ +{ + "keep": [ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:30:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:28:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:24:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T11:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T10:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T07:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T01:03:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T01:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-21T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-18T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-15T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-13T10:20:30.1Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-13T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-12T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-10T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-08T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": [ + "path1", + "path2" + ], + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2015-10-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-11T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-10T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-09T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-08T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-06T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-05T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-02T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-01T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-11T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-10T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-09T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-08T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-06T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-05T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-02T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-01T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-21T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-18T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-15T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-13T10:20:30.1Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-13T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-12T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-10T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-08T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-11-21T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-11-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-11-18T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-11-15T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + } + ], + "reasons": [ + { + "snapshot": { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-12T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T12:30:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T12:28:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T12:24:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T12:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T11:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-04T10:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-01T07:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-01T01:03:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2016-01-01T01:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-21T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-18T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-15T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-13T10:20:30.1Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-13T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-12T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-10T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-11-08T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": [ + "path1", + "path2" + ], + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-11T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-10T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-09T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-08T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-06T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-05T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-02T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-10-01T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-11T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-10T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-09T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-08T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-06T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-05T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-02T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-09-01T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-21T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-18T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-15T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-13T10:20:30.1Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-13T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-12T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-10T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2015-08-08T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-11-21T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-11-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-11-18T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + }, + { + "snapshot": { + "time": "2014-11-15T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "within 1y2m3d3h" + ], + "counters": {} + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_3 b/internal/restic/testdata/policy_keep_snapshots_3 new file mode 100644 index 000000000..265e52130 --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_3 @@ -0,0 +1,1914 @@ +{ + "keep": [ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:30:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:28:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:24:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T11:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T10:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T07:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T01:03:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T01:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-21T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-18T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-15T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-13T10:20:30.1Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-13T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-12T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-10T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-08T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": [ + "path1", + "path2" + ], + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2015-10-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-11T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-10T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-09T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-08T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-06T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-05T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-02T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-01T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-11T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-10T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-09T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-08T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-06T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-05T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-02T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-01T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-21T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-18T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-15T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-13T10:20:30.1Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-13T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-12T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-10T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-08T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-11-21T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-11-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-11-18T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-11-15T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2014-11-13T10:20:30.1Z", + "tree": null, + "paths": null, + "tags": [ + "bar" + ] + }, + { + "time": "2014-11-13T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-11-12T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-11-10T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-11-08T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-20T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-11T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-10T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-09T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-08T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-06T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-05T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-02T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-01T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-09-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-09-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-09-11T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-09-10T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-09-09T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-09-08T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-09-06T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-09-05T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-09-02T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-09-01T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-08-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-08-21T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-08-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-08-18T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-08-15T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-08-13T10:20:30.1Z", + "tree": null, + "paths": null + } + ], + "reasons": [ + { + "snapshot": { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 98 + } + }, + { + "snapshot": { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 97 + } + }, + { + "snapshot": { + "time": "2016-01-12T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 96 + } + }, + { + "snapshot": { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 95 + } + }, + { + "snapshot": { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 94 + } + }, + { + "snapshot": { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 93 + } + }, + { + "snapshot": { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 92 + } + }, + { + "snapshot": { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 91 + } + }, + { + "snapshot": { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 90 + } + }, + { + "snapshot": { + "time": "2016-01-04T12:30:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 89 + } + }, + { + "snapshot": { + "time": "2016-01-04T12:28:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 88 + } + }, + { + "snapshot": { + "time": "2016-01-04T12:24:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 87 + } + }, + { + "snapshot": { + "time": "2016-01-04T12:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 86 + } + }, + { + "snapshot": { + "time": "2016-01-04T11:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 85 + } + }, + { + "snapshot": { + "time": "2016-01-04T10:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 84 + } + }, + { + "snapshot": { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 83 + } + }, + { + "snapshot": { + "time": "2016-01-01T07:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 82 + } + }, + { + "snapshot": { + "time": "2016-01-01T01:03:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 81 + } + }, + { + "snapshot": { + "time": "2016-01-01T01:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 80 + } + }, + { + "snapshot": { + "time": "2015-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 79 + } + }, + { + "snapshot": { + "time": "2015-11-21T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 78 + } + }, + { + "snapshot": { + "time": "2015-11-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 77 + } + }, + { + "snapshot": { + "time": "2015-11-18T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 76 + } + }, + { + "snapshot": { + "time": "2015-11-15T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 75 + } + }, + { + "snapshot": { + "time": "2015-11-13T10:20:30.1Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 74 + } + }, + { + "snapshot": { + "time": "2015-11-13T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 73 + } + }, + { + "snapshot": { + "time": "2015-11-12T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 72 + } + }, + { + "snapshot": { + "time": "2015-11-10T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 71 + } + }, + { + "snapshot": { + "time": "2015-11-08T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 70 + } + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 69 + } + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": [ + "path1", + "path2" + ], + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 68 + } + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 67 + } + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 66 + } + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 65 + } + }, + { + "snapshot": { + "time": "2015-10-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 64 + } + }, + { + "snapshot": { + "time": "2015-10-11T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 63 + } + }, + { + "snapshot": { + "time": "2015-10-10T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 62 + } + }, + { + "snapshot": { + "time": "2015-10-09T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 61 + } + }, + { + "snapshot": { + "time": "2015-10-08T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 60 + } + }, + { + "snapshot": { + "time": "2015-10-06T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 59 + } + }, + { + "snapshot": { + "time": "2015-10-05T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 58 + } + }, + { + "snapshot": { + "time": "2015-10-02T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 57 + } + }, + { + "snapshot": { + "time": "2015-10-01T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 56 + } + }, + { + "snapshot": { + "time": "2015-09-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 55 + } + }, + { + "snapshot": { + "time": "2015-09-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 54 + } + }, + { + "snapshot": { + "time": "2015-09-11T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 53 + } + }, + { + "snapshot": { + "time": "2015-09-10T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 52 + } + }, + { + "snapshot": { + "time": "2015-09-09T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 51 + } + }, + { + "snapshot": { + "time": "2015-09-08T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 50 + } + }, + { + "snapshot": { + "time": "2015-09-06T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 49 + } + }, + { + "snapshot": { + "time": "2015-09-05T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 48 + } + }, + { + "snapshot": { + "time": "2015-09-02T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 47 + } + }, + { + "snapshot": { + "time": "2015-09-01T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 46 + } + }, + { + "snapshot": { + "time": "2015-08-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 45 + } + }, + { + "snapshot": { + "time": "2015-08-21T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 44 + } + }, + { + "snapshot": { + "time": "2015-08-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 43 + } + }, + { + "snapshot": { + "time": "2015-08-18T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 42 + } + }, + { + "snapshot": { + "time": "2015-08-15T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 41 + } + }, + { + "snapshot": { + "time": "2015-08-13T10:20:30.1Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 40 + } + }, + { + "snapshot": { + "time": "2015-08-13T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 39 + } + }, + { + "snapshot": { + "time": "2015-08-12T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 38 + } + }, + { + "snapshot": { + "time": "2015-08-10T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 37 + } + }, + { + "snapshot": { + "time": "2015-08-08T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 36 + } + }, + { + "snapshot": { + "time": "2014-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 35 + } + }, + { + "snapshot": { + "time": "2014-11-21T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 34 + } + }, + { + "snapshot": { + "time": "2014-11-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 33 + } + }, + { + "snapshot": { + "time": "2014-11-18T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 32 + } + }, + { + "snapshot": { + "time": "2014-11-15T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 31 + } + }, + { + "snapshot": { + "time": "2014-11-13T10:20:30.1Z", + "tree": null, + "paths": null, + "tags": [ + "bar" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 30 + } + }, + { + "snapshot": { + "time": "2014-11-13T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 29 + } + }, + { + "snapshot": { + "time": "2014-11-12T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 28 + } + }, + { + "snapshot": { + "time": "2014-11-10T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 27 + } + }, + { + "snapshot": { + "time": "2014-11-08T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 26 + } + }, + { + "snapshot": { + "time": "2014-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 25 + } + }, + { + "snapshot": { + "time": "2014-10-20T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 24 + } + }, + { + "snapshot": { + "time": "2014-10-11T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 23 + } + }, + { + "snapshot": { + "time": "2014-10-10T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 22 + } + }, + { + "snapshot": { + "time": "2014-10-09T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 21 + } + }, + { + "snapshot": { + "time": "2014-10-08T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 20 + } + }, + { + "snapshot": { + "time": "2014-10-06T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 19 + } + }, + { + "snapshot": { + "time": "2014-10-05T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 18 + } + }, + { + "snapshot": { + "time": "2014-10-02T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 17 + } + }, + { + "snapshot": { + "time": "2014-10-01T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 16 + } + }, + { + "snapshot": { + "time": "2014-09-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 15 + } + }, + { + "snapshot": { + "time": "2014-09-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 14 + } + }, + { + "snapshot": { + "time": "2014-09-11T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 13 + } + }, + { + "snapshot": { + "time": "2014-09-10T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 12 + } + }, + { + "snapshot": { + "time": "2014-09-09T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 11 + } + }, + { + "snapshot": { + "time": "2014-09-08T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 10 + } + }, + { + "snapshot": { + "time": "2014-09-06T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 9 + } + }, + { + "snapshot": { + "time": "2014-09-05T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 8 + } + }, + { + "snapshot": { + "time": "2014-09-02T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 7 + } + }, + { + "snapshot": { + "time": "2014-09-01T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 6 + } + }, + { + "snapshot": { + "time": "2014-08-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 5 + } + }, + { + "snapshot": { + "time": "2014-08-21T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 4 + } + }, + { + "snapshot": { + "time": "2014-08-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 3 + } + }, + { + "snapshot": { + "time": "2014-08-18T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 2 + } + }, + { + "snapshot": { + "time": "2014-08-15T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 1 + } + }, + { + "snapshot": { + "time": "2014-08-13T10:20:30.1Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": {} + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_4 b/internal/restic/testdata/policy_keep_snapshots_4 new file mode 100644 index 000000000..8657da8c8 --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_4 @@ -0,0 +1,1988 @@ +{ + "keep": [ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:30:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:28:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:24:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T11:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T10:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T07:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T01:03:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T01:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-21T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-18T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-15T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-13T10:20:30.1Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-13T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-12T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-10T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-08T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": [ + "path1", + "path2" + ], + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2015-10-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-11T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-10T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-09T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-08T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-06T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-05T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-02T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-01T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-11T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-10T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-09T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-08T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-06T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-05T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-02T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-01T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-21T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-18T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-15T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-13T10:20:30.1Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-13T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-12T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-10T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-08-08T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-11-21T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-11-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-11-18T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-11-15T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + { + "time": "2014-11-13T10:20:30.1Z", + "tree": null, + "paths": null, + "tags": [ + "bar" + ] + }, + { + "time": "2014-11-13T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-11-12T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-11-10T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-11-08T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-20T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-11T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-10T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-09T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-08T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-06T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-05T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-02T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-10-01T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + { + "time": "2014-09-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-09-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-09-11T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-09-10T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-09-09T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-09-08T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-09-06T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-09-05T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-09-02T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-09-01T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-08-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-08-21T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-08-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-08-18T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-08-15T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-08-13T10:20:30.1Z", + "tree": null, + "paths": null + }, + { + "time": "2014-08-13T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-08-12T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-08-10T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2014-08-08T10:20:30Z", + "tree": null, + "paths": null + } + ], + "reasons": [ + { + "snapshot": { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 199 + } + }, + { + "snapshot": { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 198 + } + }, + { + "snapshot": { + "time": "2016-01-12T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 197 + } + }, + { + "snapshot": { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 196 + } + }, + { + "snapshot": { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 195 + } + }, + { + "snapshot": { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 194 + } + }, + { + "snapshot": { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 193 + } + }, + { + "snapshot": { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 192 + } + }, + { + "snapshot": { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 191 + } + }, + { + "snapshot": { + "time": "2016-01-04T12:30:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 190 + } + }, + { + "snapshot": { + "time": "2016-01-04T12:28:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 189 + } + }, + { + "snapshot": { + "time": "2016-01-04T12:24:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 188 + } + }, + { + "snapshot": { + "time": "2016-01-04T12:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 187 + } + }, + { + "snapshot": { + "time": "2016-01-04T11:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 186 + } + }, + { + "snapshot": { + "time": "2016-01-04T10:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 185 + } + }, + { + "snapshot": { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 184 + } + }, + { + "snapshot": { + "time": "2016-01-01T07:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 183 + } + }, + { + "snapshot": { + "time": "2016-01-01T01:03:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 182 + } + }, + { + "snapshot": { + "time": "2016-01-01T01:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 181 + } + }, + { + "snapshot": { + "time": "2015-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 180 + } + }, + { + "snapshot": { + "time": "2015-11-21T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 179 + } + }, + { + "snapshot": { + "time": "2015-11-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 178 + } + }, + { + "snapshot": { + "time": "2015-11-18T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 177 + } + }, + { + "snapshot": { + "time": "2015-11-15T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 176 + } + }, + { + "snapshot": { + "time": "2015-11-13T10:20:30.1Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 175 + } + }, + { + "snapshot": { + "time": "2015-11-13T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 174 + } + }, + { + "snapshot": { + "time": "2015-11-12T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 173 + } + }, + { + "snapshot": { + "time": "2015-11-10T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 172 + } + }, + { + "snapshot": { + "time": "2015-11-08T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 171 + } + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 170 + } + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": [ + "path1", + "path2" + ], + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 169 + } + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 168 + } + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 167 + } + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 166 + } + }, + { + "snapshot": { + "time": "2015-10-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 165 + } + }, + { + "snapshot": { + "time": "2015-10-11T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 164 + } + }, + { + "snapshot": { + "time": "2015-10-10T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 163 + } + }, + { + "snapshot": { + "time": "2015-10-09T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 162 + } + }, + { + "snapshot": { + "time": "2015-10-08T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 161 + } + }, + { + "snapshot": { + "time": "2015-10-06T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 160 + } + }, + { + "snapshot": { + "time": "2015-10-05T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 159 + } + }, + { + "snapshot": { + "time": "2015-10-02T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 158 + } + }, + { + "snapshot": { + "time": "2015-10-01T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 157 + } + }, + { + "snapshot": { + "time": "2015-09-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 156 + } + }, + { + "snapshot": { + "time": "2015-09-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 155 + } + }, + { + "snapshot": { + "time": "2015-09-11T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 154 + } + }, + { + "snapshot": { + "time": "2015-09-10T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 153 + } + }, + { + "snapshot": { + "time": "2015-09-09T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 152 + } + }, + { + "snapshot": { + "time": "2015-09-08T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 151 + } + }, + { + "snapshot": { + "time": "2015-09-06T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 150 + } + }, + { + "snapshot": { + "time": "2015-09-05T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 149 + } + }, + { + "snapshot": { + "time": "2015-09-02T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 148 + } + }, + { + "snapshot": { + "time": "2015-09-01T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 147 + } + }, + { + "snapshot": { + "time": "2015-08-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 146 + } + }, + { + "snapshot": { + "time": "2015-08-21T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 145 + } + }, + { + "snapshot": { + "time": "2015-08-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 144 + } + }, + { + "snapshot": { + "time": "2015-08-18T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 143 + } + }, + { + "snapshot": { + "time": "2015-08-15T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 142 + } + }, + { + "snapshot": { + "time": "2015-08-13T10:20:30.1Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 141 + } + }, + { + "snapshot": { + "time": "2015-08-13T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 140 + } + }, + { + "snapshot": { + "time": "2015-08-12T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 139 + } + }, + { + "snapshot": { + "time": "2015-08-10T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 138 + } + }, + { + "snapshot": { + "time": "2015-08-08T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 137 + } + }, + { + "snapshot": { + "time": "2014-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 136 + } + }, + { + "snapshot": { + "time": "2014-11-21T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 135 + } + }, + { + "snapshot": { + "time": "2014-11-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 134 + } + }, + { + "snapshot": { + "time": "2014-11-18T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 133 + } + }, + { + "snapshot": { + "time": "2014-11-15T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo", + "bar" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 132 + } + }, + { + "snapshot": { + "time": "2014-11-13T10:20:30.1Z", + "tree": null, + "paths": null, + "tags": [ + "bar" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 131 + } + }, + { + "snapshot": { + "time": "2014-11-13T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 130 + } + }, + { + "snapshot": { + "time": "2014-11-12T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 129 + } + }, + { + "snapshot": { + "time": "2014-11-10T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 128 + } + }, + { + "snapshot": { + "time": "2014-11-08T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 127 + } + }, + { + "snapshot": { + "time": "2014-10-22T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 126 + } + }, + { + "snapshot": { + "time": "2014-10-20T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 125 + } + }, + { + "snapshot": { + "time": "2014-10-11T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 124 + } + }, + { + "snapshot": { + "time": "2014-10-10T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 123 + } + }, + { + "snapshot": { + "time": "2014-10-09T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 122 + } + }, + { + "snapshot": { + "time": "2014-10-08T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 121 + } + }, + { + "snapshot": { + "time": "2014-10-06T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 120 + } + }, + { + "snapshot": { + "time": "2014-10-05T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 119 + } + }, + { + "snapshot": { + "time": "2014-10-02T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 118 + } + }, + { + "snapshot": { + "time": "2014-10-01T10:20:30Z", + "tree": null, + "paths": null, + "tags": [ + "foo" + ] + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 117 + } + }, + { + "snapshot": { + "time": "2014-09-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 116 + } + }, + { + "snapshot": { + "time": "2014-09-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 115 + } + }, + { + "snapshot": { + "time": "2014-09-11T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 114 + } + }, + { + "snapshot": { + "time": "2014-09-10T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 113 + } + }, + { + "snapshot": { + "time": "2014-09-09T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 112 + } + }, + { + "snapshot": { + "time": "2014-09-08T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 111 + } + }, + { + "snapshot": { + "time": "2014-09-06T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 110 + } + }, + { + "snapshot": { + "time": "2014-09-05T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 109 + } + }, + { + "snapshot": { + "time": "2014-09-02T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 108 + } + }, + { + "snapshot": { + "time": "2014-09-01T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 107 + } + }, + { + "snapshot": { + "time": "2014-08-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 106 + } + }, + { + "snapshot": { + "time": "2014-08-21T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 105 + } + }, + { + "snapshot": { + "time": "2014-08-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 104 + } + }, + { + "snapshot": { + "time": "2014-08-18T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 103 + } + }, + { + "snapshot": { + "time": "2014-08-15T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 102 + } + }, + { + "snapshot": { + "time": "2014-08-13T10:20:30.1Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 101 + } + }, + { + "snapshot": { + "time": "2014-08-13T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 100 + } + }, + { + "snapshot": { + "time": "2014-08-12T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 99 + } + }, + { + "snapshot": { + "time": "2014-08-10T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 98 + } + }, + { + "snapshot": { + "time": "2014-08-08T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 97 + } + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_5 b/internal/restic/testdata/policy_keep_snapshots_5 new file mode 100644 index 000000000..8ed7a3d3e --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_5 @@ -0,0 +1,364 @@ +{ + "keep": [ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:30:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T11:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T10:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T07:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T01:03:03Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-21T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-18T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-15T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-13T10:20:30.1Z", + "tree": null, + "paths": null + } + ], + "reasons": [ + { + "snapshot": { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "hourly snapshot" + ], + "counters": { + "hourly": 19 + } + }, + { + "snapshot": { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "hourly snapshot" + ], + "counters": { + "hourly": 18 + } + }, + { + "snapshot": { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "hourly snapshot" + ], + "counters": { + "hourly": 17 + } + }, + { + "snapshot": { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "hourly snapshot" + ], + "counters": { + "hourly": 16 + } + }, + { + "snapshot": { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "hourly snapshot" + ], + "counters": { + "hourly": 15 + } + }, + { + "snapshot": { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "hourly snapshot" + ], + "counters": { + "hourly": 14 + } + }, + { + "snapshot": { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "hourly snapshot" + ], + "counters": { + "hourly": 13 + } + }, + { + "snapshot": { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "hourly snapshot" + ], + "counters": { + "hourly": 12 + } + }, + { + "snapshot": { + "time": "2016-01-04T12:30:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "hourly snapshot" + ], + "counters": { + "hourly": 11 + } + }, + { + "snapshot": { + "time": "2016-01-04T11:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "hourly snapshot" + ], + "counters": { + "hourly": 10 + } + }, + { + "snapshot": { + "time": "2016-01-04T10:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "hourly snapshot" + ], + "counters": { + "hourly": 9 + } + }, + { + "snapshot": { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "hourly snapshot" + ], + "counters": { + "hourly": 8 + } + }, + { + "snapshot": { + "time": "2016-01-01T07:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "hourly snapshot" + ], + "counters": { + "hourly": 7 + } + }, + { + "snapshot": { + "time": "2016-01-01T01:03:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "hourly snapshot" + ], + "counters": { + "hourly": 6 + } + }, + { + "snapshot": { + "time": "2015-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "hourly snapshot" + ], + "counters": { + "hourly": 5 + } + }, + { + "snapshot": { + "time": "2015-11-21T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "hourly snapshot" + ], + "counters": { + "hourly": 4 + } + }, + { + "snapshot": { + "time": "2015-11-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "hourly snapshot" + ], + "counters": { + "hourly": 3 + } + }, + { + "snapshot": { + "time": "2015-11-18T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "hourly snapshot" + ], + "counters": { + "hourly": 2 + } + }, + { + "snapshot": { + "time": "2015-11-15T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "hourly snapshot" + ], + "counters": { + "hourly": 1 + } + }, + { + "snapshot": { + "time": "2015-11-13T10:20:30.1Z", + "tree": null, + "paths": null + }, + "matches": [ + "hourly snapshot" + ], + "counters": {} + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_6 b/internal/restic/testdata/policy_keep_snapshots_6 new file mode 100644 index 000000000..d72ad886d --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_6 @@ -0,0 +1,58 @@ +{ + "keep": [ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + } + ], + "reasons": [ + { + "snapshot": { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 2 + } + }, + { + "snapshot": { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 1 + } + }, + { + "snapshot": { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": {} + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_7 b/internal/restic/testdata/policy_keep_snapshots_7 new file mode 100644 index 000000000..6177ebe94 --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_7 @@ -0,0 +1,184 @@ +{ + "keep": [ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T07:08:03Z", + "tree": null, + "paths": null + } + ], + "reasons": [ + { + "snapshot": { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 9 + } + }, + { + "snapshot": { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 8 + } + }, + { + "snapshot": { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 7 + } + }, + { + "snapshot": { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 6 + } + }, + { + "snapshot": { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 5 + } + }, + { + "snapshot": { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 4 + } + }, + { + "snapshot": { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 3 + } + }, + { + "snapshot": { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 2 + } + }, + { + "snapshot": { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 1 + } + }, + { + "snapshot": { + "time": "2016-01-01T07:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": {} + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_8 b/internal/restic/testdata/policy_keep_snapshots_8 new file mode 100644 index 000000000..d92aa1c20 --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_8 @@ -0,0 +1,544 @@ +{ + "keep": [ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T07:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-21T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-18T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-15T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-13T10:20:30.1Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-12T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-10T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-11-08T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-20T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-11T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-10T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-09T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-08T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-06T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-05T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-02T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-10-01T10:20:30Z", + "tree": null, + "paths": null + }, + { + "time": "2015-09-22T10:20:30Z", + "tree": null, + "paths": null + } + ], + "reasons": [ + { + "snapshot": { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 29 + } + }, + { + "snapshot": { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 28 + } + }, + { + "snapshot": { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 27 + } + }, + { + "snapshot": { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 26 + } + }, + { + "snapshot": { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 25 + } + }, + { + "snapshot": { + "time": "2016-01-06T08:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 24 + } + }, + { + "snapshot": { + "time": "2016-01-05T09:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 23 + } + }, + { + "snapshot": { + "time": "2016-01-04T16:23:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 22 + } + }, + { + "snapshot": { + "time": "2016-01-03T07:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 21 + } + }, + { + "snapshot": { + "time": "2016-01-01T07:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 20 + } + }, + { + "snapshot": { + "time": "2015-11-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 19 + } + }, + { + "snapshot": { + "time": "2015-11-21T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 18 + } + }, + { + "snapshot": { + "time": "2015-11-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 17 + } + }, + { + "snapshot": { + "time": "2015-11-18T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 16 + } + }, + { + "snapshot": { + "time": "2015-11-15T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 15 + } + }, + { + "snapshot": { + "time": "2015-11-13T10:20:30.1Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 14 + } + }, + { + "snapshot": { + "time": "2015-11-12T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 13 + } + }, + { + "snapshot": { + "time": "2015-11-10T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 12 + } + }, + { + "snapshot": { + "time": "2015-11-08T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 11 + } + }, + { + "snapshot": { + "time": "2015-10-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 10 + } + }, + { + "snapshot": { + "time": "2015-10-20T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 9 + } + }, + { + "snapshot": { + "time": "2015-10-11T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 8 + } + }, + { + "snapshot": { + "time": "2015-10-10T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 7 + } + }, + { + "snapshot": { + "time": "2015-10-09T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 6 + } + }, + { + "snapshot": { + "time": "2015-10-08T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 5 + } + }, + { + "snapshot": { + "time": "2015-10-06T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 4 + } + }, + { + "snapshot": { + "time": "2015-10-05T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 3 + } + }, + { + "snapshot": { + "time": "2015-10-02T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 2 + } + }, + { + "snapshot": { + "time": "2015-10-01T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": { + "daily": 1 + } + }, + { + "snapshot": { + "time": "2015-09-22T10:20:30Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": {} + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/policy_keep_snapshots_9 b/internal/restic/testdata/policy_keep_snapshots_9 new file mode 100644 index 000000000..cc33b6f82 --- /dev/null +++ b/internal/restic/testdata/policy_keep_snapshots_9 @@ -0,0 +1,120 @@ +{ + "keep": [ + { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + } + ], + "reasons": [ + { + "snapshot": { + "time": "2016-01-18T12:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot", + "daily snapshot" + ], + "counters": { + "last": 4, + "daily": 4 + } + }, + { + "snapshot": { + "time": "2016-01-12T21:08:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot", + "daily snapshot" + ], + "counters": { + "last": 3, + "daily": 3 + } + }, + { + "snapshot": { + "time": "2016-01-12T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot" + ], + "counters": { + "last": 2, + "daily": 3 + } + }, + { + "snapshot": { + "time": "2016-01-09T21:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot", + "daily snapshot" + ], + "counters": { + "last": 1, + "daily": 2 + } + }, + { + "snapshot": { + "time": "2016-01-08T20:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "last snapshot", + "daily snapshot" + ], + "counters": { + "daily": 1 + } + }, + { + "snapshot": { + "time": "2016-01-07T10:02:03Z", + "tree": null, + "paths": null + }, + "matches": [ + "daily snapshot" + ], + "counters": {} + } + ] +} \ No newline at end of file diff --git a/internal/restic/testdata/used_blobs_snapshot0 b/internal/restic/testdata/used_blobs_snapshot0 new file mode 100644 index 000000000..667ad34db --- /dev/null +++ b/internal/restic/testdata/used_blobs_snapshot0 @@ -0,0 +1,23 @@ +{"ID":"05bddd650a800f83f7c0d844cecb1e02f99ce962df5652a53842be50386078e1","Type":"data"} +{"ID":"087040b12f129e89e4eab2b86aa14467404366a17a6082efb0d11fa7e2f9f58e","Type":"data"} +{"ID":"1e0f0e5799b9d711e07883050366c7eee6b7481c0d884694093149f6c4e9789a","Type":"data"} +{"ID":"229eac8e4e6c2e8d7b1d9f9627ab5d1a59cb17c5744c1e3634215116e7a92e7d","Type":"tree"} +{"ID":"4719f8a039f5b745e16cf90e5b84c9255c290d500da716f7dd25909cdabb85b6","Type":"data"} +{"ID":"4e352975938a29711c3003c498185972235af261a6cf8cf700a8a6ee4f914b05","Type":"data"} +{"ID":"606772eacb7fe1a79267088dcadd13431914854faf1d39d47fe99a26b9fecdcb","Type":"data"} +{"ID":"6b5fd3a9baf615489c82a99a71f9917bf9a2d82d5f640d7f47d175412c4b8d19","Type":"data"} +{"ID":"72b6eb0fd0d87e00392f8b91efc1a4c3f7f5c0c76f861b38aea054bc9d43463b","Type":"data"} +{"ID":"77ab53b52e0cf13b300d1b7f6dac89287c8d86769d85e8a273311006ce6359be","Type":"data"} +{"ID":"99dab094430d3c1be22c801a6ad7364d490a8d2ce3f9dfa3d2677431446925f4","Type":"data"} +{"ID":"9face1b278a49ef8819fbc1855ce573a85077453bbf6683488cad7767c3a38a7","Type":"tree"} +{"ID":"a4c97189465344038584e76c965dd59100eaed051db1fa5ba0e143897e2c87f1","Type":"data"} +{"ID":"a69c8621776ca8bb34c6c90e5ad811ddc8e2e5cfd6bb0cec5e75cca70e0b9ade","Type":"data"} +{"ID":"b11f4dd9d2722b3325186f57cd13a71a3af7791118477f355b49d101104e4c22","Type":"data"} +{"ID":"b1f2ae9d748035e5bd9a87f2579405166d150c6560d8919496f02855e1c36cf9","Type":"data"} +{"ID":"b5ba06039224566a09555abd089de7a693660154991295122fa72b0a3adc4150","Type":"data"} +{"ID":"b7040572b44cbfea8b784ecf8679c3d75cefc1cd3d12ed783ca0d8e5d124a60f","Type":"data"} +{"ID":"b9e634143719742fe77feed78b61f09573d59d2efa23d6d54afe6c159d220503","Type":"data"} +{"ID":"ca896fc9ebf95fcffd7c768b07b92110b21e332a47fef7e382bf15363b0ece1a","Type":"data"} +{"ID":"e6fe3512ea23a4ebf040d30958c669f7ffe724400f155a756467a9f3cafc27c5","Type":"data"} +{"ID":"e96774ac5abfbb59940939f614d65a397fb7b5abba76c29bfe14479c6616eea0","Type":"tree"} +{"ID":"ed00928ce97ac5acd27c862d9097e606536e9063af1c47481257811f66260f3a","Type":"data"} diff --git a/internal/restic/testdata/used_blobs_snapshot1 b/internal/restic/testdata/used_blobs_snapshot1 new file mode 100644 index 000000000..a5e8caedf --- /dev/null +++ b/internal/restic/testdata/used_blobs_snapshot1 @@ -0,0 +1,15 @@ +{"ID":"04ff190aea26dae65ba4c782926cdfb700b484a8b802a5ffd58e3fadcf70b797","Type":"tree"} +{"ID":"05bddd650a800f83f7c0d844cecb1e02f99ce962df5652a53842be50386078e1","Type":"data"} +{"ID":"18dcaa1a676823c909aafabbb909652591915eebdde4f9a65cee955157583494","Type":"data"} +{"ID":"4719f8a039f5b745e16cf90e5b84c9255c290d500da716f7dd25909cdabb85b6","Type":"data"} +{"ID":"6824d08e63a598c02b364e25f195e64758494b5944f06c921ff30029e1e4e4bf","Type":"data"} +{"ID":"72b6eb0fd0d87e00392f8b91efc1a4c3f7f5c0c76f861b38aea054bc9d43463b","Type":"data"} +{"ID":"8192279e4b56e1644dcff715d5e08d875cd5713349139d36d142ed28364d8e00","Type":"data"} +{"ID":"a69c8621776ca8bb34c6c90e5ad811ddc8e2e5cfd6bb0cec5e75cca70e0b9ade","Type":"data"} +{"ID":"b1f2ae9d748035e5bd9a87f2579405166d150c6560d8919496f02855e1c36cf9","Type":"data"} +{"ID":"b9e634143719742fe77feed78b61f09573d59d2efa23d6d54afe6c159d220503","Type":"data"} +{"ID":"bdd5a029dd295e5998c518022547d185794e72d8f8c38709a638c5841284daef","Type":"tree"} +{"ID":"ca896fc9ebf95fcffd7c768b07b92110b21e332a47fef7e382bf15363b0ece1a","Type":"data"} +{"ID":"cc4cab5b20a3a88995f8cdb8b0698d67a32dbc5b54487f03cb612c30a626af39","Type":"data"} +{"ID":"e6fe3512ea23a4ebf040d30958c669f7ffe724400f155a756467a9f3cafc27c5","Type":"data"} +{"ID":"ed00928ce97ac5acd27c862d9097e606536e9063af1c47481257811f66260f3a","Type":"data"} diff --git a/internal/restic/testdata/used_blobs_snapshot2 b/internal/restic/testdata/used_blobs_snapshot2 new file mode 100644 index 000000000..f6404737e --- /dev/null +++ b/internal/restic/testdata/used_blobs_snapshot2 @@ -0,0 +1,24 @@ +{"ID":"05bddd650a800f83f7c0d844cecb1e02f99ce962df5652a53842be50386078e1","Type":"data"} +{"ID":"087040b12f129e89e4eab2b86aa14467404366a17a6082efb0d11fa7e2f9f58e","Type":"data"} +{"ID":"0b88f99abc5ac71c54b3e8263c52ecb7d8903462779afdb3c8176ec5c4bb04fb","Type":"data"} +{"ID":"1e0f0e5799b9d711e07883050366c7eee6b7481c0d884694093149f6c4e9789a","Type":"data"} +{"ID":"27917462f89cecae77a4c8fb65a094b9b75a917f13794c628b1640b17f4c4981","Type":"data"} +{"ID":"32745e4b26a5883ecec272c9fbfe7f3c9835c9ab41c9a2baa4d06f319697a0bd","Type":"data"} +{"ID":"4719f8a039f5b745e16cf90e5b84c9255c290d500da716f7dd25909cdabb85b6","Type":"data"} +{"ID":"4e352975938a29711c3003c498185972235af261a6cf8cf700a8a6ee4f914b05","Type":"data"} +{"ID":"6824d08e63a598c02b364e25f195e64758494b5944f06c921ff30029e1e4e4bf","Type":"data"} +{"ID":"6b5fd3a9baf615489c82a99a71f9917bf9a2d82d5f640d7f47d175412c4b8d19","Type":"data"} +{"ID":"95c97192efa810ccb1cee112238dca28673fbffce205d75ce8cc990a31005a51","Type":"data"} +{"ID":"99dab094430d3c1be22c801a6ad7364d490a8d2ce3f9dfa3d2677431446925f4","Type":"data"} +{"ID":"9face1b278a49ef8819fbc1855ce573a85077453bbf6683488cad7767c3a38a7","Type":"tree"} +{"ID":"a4c97189465344038584e76c965dd59100eaed051db1fa5ba0e143897e2c87f1","Type":"data"} +{"ID":"a5f2ffcd54e28e2ef3089c35b72aafda66161125e23dad581087ccd050c111c3","Type":"tree"} +{"ID":"a69c8621776ca8bb34c6c90e5ad811ddc8e2e5cfd6bb0cec5e75cca70e0b9ade","Type":"data"} +{"ID":"ab5205525de94e564e3a00f634fcf9ebc397debd567734c68da7b406e612aae4","Type":"tree"} +{"ID":"b6a7e8d2aa717e0a6bd68abab512c6b566074b5a6ca2edf4cd446edc5857d732","Type":"data"} +{"ID":"be2055b7125ccf824fcfa8faa4eb3985119012bac26643944eee46218e71306e","Type":"tree"} +{"ID":"bfc2fdb527b0c9f66bbb8d4ff1c44023cc2414efcc7f0831c10debab06bb4388","Type":"tree"} +{"ID":"ca896fc9ebf95fcffd7c768b07b92110b21e332a47fef7e382bf15363b0ece1a","Type":"data"} +{"ID":"e6fe3512ea23a4ebf040d30958c669f7ffe724400f155a756467a9f3cafc27c5","Type":"data"} +{"ID":"ed00928ce97ac5acd27c862d9097e606536e9063af1c47481257811f66260f3a","Type":"data"} +{"ID":"f3cd67d9c14d2a81663d63522ab914e465b021a3b65e2f1ea6caf7478f2ec139","Type":"data"} diff --git a/internal/restic/testing.go b/internal/restic/testing.go new file mode 100644 index 000000000..eb10919e7 --- /dev/null +++ b/internal/restic/testing.go @@ -0,0 +1,215 @@ +package restic + +import ( + "context" + "encoding/json" + "fmt" + "io" + "math/rand" + "testing" + "time" + + "github.com/restic/restic/internal/errors" + + "github.com/restic/chunker" +) + +// fakeFile returns a reader which yields deterministic pseudo-random data. +func fakeFile(t testing.TB, seed, size int64) io.Reader { + return io.LimitReader(NewRandReader(rand.New(rand.NewSource(seed))), size) +} + +type fakeFileSystem struct { + t testing.TB + repo Repository + knownBlobs IDSet + duplication float32 + buf []byte + chunker *chunker.Chunker + rand *rand.Rand +} + +// saveFile reads from rd and saves the blobs in the repository. The list of +// IDs is returned. +func (fs *fakeFileSystem) saveFile(ctx context.Context, rd io.Reader) (blobs IDs) { + if fs.buf == nil { + fs.buf = make([]byte, chunker.MaxSize) + } + + if fs.chunker == nil { + fs.chunker = chunker.New(rd, fs.repo.Config().ChunkerPolynomial) + } else { + fs.chunker.Reset(rd, fs.repo.Config().ChunkerPolynomial) + } + + blobs = IDs{} + for { + chunk, err := fs.chunker.Next(fs.buf) + if errors.Cause(err) == io.EOF { + break + } + + if err != nil { + fs.t.Fatalf("unable to save chunk in repo: %v", err) + } + + id := Hash(chunk.Data) + if !fs.blobIsKnown(id, DataBlob) { + _, err := fs.repo.SaveBlob(ctx, DataBlob, chunk.Data, id) + if err != nil { + fs.t.Fatalf("error saving chunk: %v", err) + } + + fs.knownBlobs.Insert(id) + } + + blobs = append(blobs, id) + } + + return blobs +} + +const ( + maxFileSize = 20000 + maxSeed = 32 + maxNodes = 15 +) + +func (fs *fakeFileSystem) treeIsKnown(tree *Tree) (bool, []byte, ID) { + data, err := json.Marshal(tree) + if err != nil { + fs.t.Fatalf("json.Marshal(tree) returned error: %v", err) + return false, nil, ID{} + } + data = append(data, '\n') + + id := Hash(data) + return fs.blobIsKnown(id, TreeBlob), data, id +} + +func (fs *fakeFileSystem) blobIsKnown(id ID, t BlobType) bool { + if fs.rand.Float32() < fs.duplication { + return false + } + + if fs.knownBlobs.Has(id) { + return true + } + + if fs.repo.Index().Has(id, t) { + return true + } + + fs.knownBlobs.Insert(id) + return false +} + +// saveTree saves a tree of fake files in the repo and returns the ID. +func (fs *fakeFileSystem) saveTree(ctx context.Context, seed int64, depth int) ID { + rnd := rand.NewSource(seed) + numNodes := int(rnd.Int63() % maxNodes) + + var tree Tree + for i := 0; i < numNodes; i++ { + + // randomly select the type of the node, either tree (p = 1/4) or file (p = 3/4). + if depth > 1 && rnd.Int63()%4 == 0 { + treeSeed := rnd.Int63() % maxSeed + id := fs.saveTree(ctx, treeSeed, depth-1) + + node := &Node{ + Name: fmt.Sprintf("dir-%v", treeSeed), + Type: "dir", + Mode: 0755, + Subtree: &id, + } + + tree.Nodes = append(tree.Nodes, node) + continue + } + + fileSeed := rnd.Int63() % maxSeed + fileSize := (maxFileSize / maxSeed) * fileSeed + + node := &Node{ + Name: fmt.Sprintf("file-%v", fileSeed), + Type: "file", + Mode: 0644, + Size: uint64(fileSize), + } + + node.Content = fs.saveFile(ctx, fakeFile(fs.t, fileSeed, fileSize)) + tree.Nodes = append(tree.Nodes, node) + } + + known, buf, id := fs.treeIsKnown(&tree) + if known { + return id + } + + _, err := fs.repo.SaveBlob(ctx, TreeBlob, buf, id) + if err != nil { + fs.t.Fatal(err) + } + + return id +} + +// TestCreateSnapshot creates a snapshot filled with fake data. The +// fake data is generated deterministically from the timestamp `at`, which is +// also used as the snapshot's timestamp. The tree's depth can be specified +// with the parameter depth. The parameter duplication is a probability that +// the same blob will saved again. +func TestCreateSnapshot(t testing.TB, repo Repository, at time.Time, depth int, duplication float32) *Snapshot { + seed := at.Unix() + t.Logf("create fake snapshot at %s with seed %d", at, seed) + + fakedir := fmt.Sprintf("fakedir-at-%v", at.Format("2006-01-02 15:04:05")) + snapshot, err := NewSnapshot([]string{fakedir}, []string{"test"}, "foo", time.Now()) + if err != nil { + t.Fatal(err) + } + snapshot.Time = at + + fs := fakeFileSystem{ + t: t, + repo: repo, + knownBlobs: NewIDSet(), + duplication: duplication, + rand: rand.New(rand.NewSource(seed)), + } + + treeID := fs.saveTree(context.TODO(), seed, depth) + snapshot.Tree = &treeID + + id, err := repo.SaveJSONUnpacked(context.TODO(), SnapshotFile, snapshot) + if err != nil { + t.Fatal(err) + } + + snapshot.id = &id + + t.Logf("saved snapshot %v", id.Str()) + + err = repo.Flush(context.Background()) + if err != nil { + t.Fatal(err) + } + + err = repo.SaveIndex(context.TODO()) + if err != nil { + t.Fatal(err) + } + + return snapshot +} + +// TestParseID parses s as a ID and panics if that fails. +func TestParseID(s string) ID { + id, err := ParseID(s) + if err != nil { + panic(fmt.Sprintf("unable to parse string %q as ID: %v", s, err)) + } + + return id +} diff --git a/internal/restic/testing_test.go b/internal/restic/testing_test.go new file mode 100644 index 000000000..0386fb76a --- /dev/null +++ b/internal/restic/testing_test.go @@ -0,0 +1,62 @@ +package restic_test + +import ( + "context" + "testing" + "time" + + "github.com/restic/restic/internal/checker" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" +) + +var testSnapshotTime = time.Unix(1460289341, 207401672) + +const ( + testCreateSnapshots = 3 + testDepth = 2 +) + +func TestCreateSnapshot(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + for i := 0; i < testCreateSnapshots; i++ { + restic.TestCreateSnapshot(t, repo, testSnapshotTime.Add(time.Duration(i)*time.Second), testDepth, 0) + } + + snapshots, err := restic.LoadAllSnapshots(context.TODO(), repo) + if err != nil { + t.Fatal(err) + } + + if len(snapshots) != testCreateSnapshots { + t.Fatalf("got %d snapshots, expected %d", len(snapshots), 1) + } + + sn := snapshots[0] + if sn.Time.Before(testSnapshotTime) || sn.Time.After(testSnapshotTime.Add(testCreateSnapshots*time.Second)) { + t.Fatalf("timestamp %v is outside of the allowed time range", sn.Time) + } + + if sn.Tree == nil { + t.Fatalf("tree id is nil") + } + + if sn.Tree.IsNull() { + t.Fatalf("snapshot has zero tree ID") + } + + checker.TestCheckRepo(t, repo) +} + +func BenchmarkTestCreateSnapshot(t *testing.B) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + t.ResetTimer() + + for i := 0; i < t.N; i++ { + restic.TestCreateSnapshot(t, repo, testSnapshotTime.Add(time.Duration(i)*time.Second), testDepth, 0) + } +} diff --git a/internal/restic/tree.go b/internal/restic/tree.go new file mode 100644 index 000000000..81650105a --- /dev/null +++ b/internal/restic/tree.go @@ -0,0 +1,100 @@ +package restic + +import ( + "fmt" + "sort" + + "github.com/restic/restic/internal/errors" + + "github.com/restic/restic/internal/debug" +) + +// Tree is an ordered list of nodes. +type Tree struct { + Nodes []*Node `json:"nodes"` +} + +// NewTree creates a new tree object. +func NewTree() *Tree { + return &Tree{ + Nodes: []*Node{}, + } +} + +func (t *Tree) String() string { + return fmt.Sprintf("Tree<%d nodes>", len(t.Nodes)) +} + +// Equals returns true if t and other have exactly the same nodes. +func (t *Tree) Equals(other *Tree) bool { + if len(t.Nodes) != len(other.Nodes) { + debug.Log("tree.Equals(): trees have different number of nodes") + return false + } + + for i := 0; i < len(t.Nodes); i++ { + if !t.Nodes[i].Equals(*other.Nodes[i]) { + debug.Log("tree.Equals(): node %d is different:", i) + debug.Log(" %#v", t.Nodes[i]) + debug.Log(" %#v", other.Nodes[i]) + return false + } + } + + return true +} + +// Insert adds a new node at the correct place in the tree. +func (t *Tree) Insert(node *Node) error { + pos, found := t.find(node.Name) + if found != nil { + return errors.Errorf("node %q already present", node.Name) + } + + // https://code.google.com/p/go-wiki/wiki/SliceTricks + t.Nodes = append(t.Nodes, &Node{}) + copy(t.Nodes[pos+1:], t.Nodes[pos:]) + t.Nodes[pos] = node + + return nil +} + +func (t *Tree) find(name string) (int, *Node) { + pos := sort.Search(len(t.Nodes), func(i int) bool { + return t.Nodes[i].Name >= name + }) + + if pos < len(t.Nodes) && t.Nodes[pos].Name == name { + return pos, t.Nodes[pos] + } + + return pos, nil +} + +// Find returns a node with the given name, or nil if none could be found. +func (t *Tree) Find(name string) *Node { + if t == nil { + return nil + } + + _, node := t.find(name) + return node +} + +// Sort sorts the nodes by name. +func (t *Tree) Sort() { + list := Nodes(t.Nodes) + sort.Sort(list) + t.Nodes = list +} + +// Subtrees returns a slice of all subtree IDs of the tree. +func (t *Tree) Subtrees() (trees IDs) { + for _, node := range t.Nodes { + if node.Type == "dir" && node.Subtree != nil { + trees = append(trees, *node.Subtree) + } + } + + return trees +} diff --git a/internal/restic/tree_test.go b/internal/restic/tree_test.go new file mode 100644 index 000000000..2bcda6760 --- /dev/null +++ b/internal/restic/tree_test.go @@ -0,0 +1,115 @@ +package restic_test + +import ( + "context" + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +var testFiles = []struct { + name string + content []byte +}{ + {"foo", []byte("bar")}, + {"bar/foo2", []byte("bar2")}, + {"bar/bla/blubb", []byte("This is just a test!\n")}, +} + +func createTempDir(t *testing.T) string { + tempdir, err := ioutil.TempDir(rtest.TestTempDir, "restic-test-") + rtest.OK(t, err) + + for _, test := range testFiles { + file := filepath.Join(tempdir, test.name) + dir := filepath.Dir(file) + if dir != "." { + rtest.OK(t, os.MkdirAll(dir, 0755)) + } + + f, err := os.Create(file) + defer func() { + rtest.OK(t, f.Close()) + }() + + rtest.OK(t, err) + + _, err = f.Write(test.content) + rtest.OK(t, err) + } + + return tempdir +} + +func TestTree(t *testing.T) { + dir := createTempDir(t) + defer func() { + if rtest.TestCleanupTempDirs { + rtest.RemoveAll(t, dir) + } + }() +} + +var testNodes = []restic.Node{ + {Name: "normal"}, + {Name: "with backslashes \\zzz"}, + {Name: "test utf-8 föbärß"}, + {Name: "test invalid \x00\x01\x02\x03\x04"}, + {Name: "test latin1 \x75\x6d\x6c\xe4\xfc\x74\xf6\x6e\xdf\x6e\x6c\x6c"}, +} + +func TestNodeMarshal(t *testing.T) { + for i, n := range testNodes { + data, err := json.Marshal(&n) + rtest.OK(t, err) + + var node restic.Node + err = json.Unmarshal(data, &node) + rtest.OK(t, err) + + if n.Name != node.Name { + t.Fatalf("Node %d: Names are not equal, want: %q got: %q", i, n.Name, node.Name) + } + } +} + +func TestNodeComparison(t *testing.T) { + fi, err := os.Lstat("tree_test.go") + rtest.OK(t, err) + + node, err := restic.NodeFromFileInfo("tree_test.go", fi) + rtest.OK(t, err) + + n2 := *node + rtest.Assert(t, node.Equals(n2), "nodes aren't equal") + + n2.Size-- + rtest.Assert(t, !node.Equals(n2), "nodes are equal") +} + +func TestLoadTree(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + // save tree + tree := restic.NewTree() + id, err := repo.SaveTree(context.TODO(), tree) + rtest.OK(t, err) + + // save packs + rtest.OK(t, repo.Flush(context.Background())) + + // load tree again + tree2, err := repo.LoadTree(context.TODO(), id) + rtest.OK(t, err) + + rtest.Assert(t, tree.Equals(tree2), + "trees are not equal: want %v, got %v", + tree, tree2) +} diff --git a/internal/restorer/doc.go b/internal/restorer/doc.go new file mode 100644 index 000000000..b3583c728 --- /dev/null +++ b/internal/restorer/doc.go @@ -0,0 +1,33 @@ +// Package restorer contains code to restore data from a repository. +// +// The Restorer tries to keep the number of backend requests minimal. It does +// this by downloading all required blobs of a pack file with a single backend +// request and avoiding repeated downloads of the same pack. In addition, +// several pack files are fetched concurrently. +// +// Here is high-level pseudo-code of the how the Restorer attempts to achieve +// these goals: +// +// while there are packs to process +// choose a pack to process [1] +// get the pack from the backend or cache [2] +// write pack blobs to the files that need them [3] +// if not all pack blobs were used +// cache the pack for future use [4] +// +// Pack download and processing (steps [2] - [4]) runs on multiple concurrent +// Goroutines. The Restorer runs all steps [2]-[4] sequentially on the same +// Goroutine. +// +// Before a pack is downloaded (step [2]), the required space is "reserved" in +// the pack cache. Actual download uses single backend request to get all +// required pack blobs. This may download blobs that are not needed, but we +// assume it'll still be faster than getting individual blobs. +// +// Target files are written (step [3]) in the "right" order, first file blob +// first, then second, then third and so on. Blob write order implies that some +// pack blobs may not be immediately used, i.e. they are "out of order" for +// their respective target files. Packs with unused blobs are cached (step +// [4]). The cache has capacity limit and may purge packs before they are fully +// used, in which case the purged packs will need to be re-downloaded. +package restorer diff --git a/internal/restorer/filepacktraverser.go b/internal/restorer/filepacktraverser.go new file mode 100644 index 000000000..bba61e0f9 --- /dev/null +++ b/internal/restorer/filepacktraverser.go @@ -0,0 +1,52 @@ +package restorer + +import ( + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" +) + +type filePackTraverser struct { + lookup func(restic.ID, restic.BlobType) ([]restic.PackedBlob, bool) +} + +// iterates over all remaining packs of the file +func (t *filePackTraverser) forEachFilePack(file *fileInfo, fn func(packIdx int, packID restic.ID, packBlobs []restic.Blob) bool) error { + if len(file.blobs) == 0 { + return nil + } + + getBlobPack := func(blobID restic.ID) (restic.PackedBlob, error) { + packs, found := t.lookup(blobID, restic.DataBlob) + if !found { + return restic.PackedBlob{}, errors.Errorf("Unknown blob %s", blobID.String()) + } + // TODO which pack to use if multiple packs have the blob? + // MUST return the same pack for the same blob during the same execution + return packs[0], nil + } + + var prevPackID restic.ID + var prevPackBlobs []restic.Blob + packIdx := 0 + for _, blobID := range file.blobs { + packedBlob, err := getBlobPack(blobID) + if err != nil { + return err + } + if !prevPackID.IsNull() && prevPackID != packedBlob.PackID { + if !fn(packIdx, prevPackID, prevPackBlobs) { + return nil + } + packIdx++ + } + if prevPackID != packedBlob.PackID { + prevPackID = packedBlob.PackID + prevPackBlobs = make([]restic.Blob, 0) + } + prevPackBlobs = append(prevPackBlobs, packedBlob.Blob) + } + if len(prevPackBlobs) > 0 { + fn(packIdx, prevPackID, prevPackBlobs) + } + return nil +} diff --git a/internal/restorer/filerestorer.go b/internal/restorer/filerestorer.go new file mode 100644 index 000000000..4baf9b567 --- /dev/null +++ b/internal/restorer/filerestorer.go @@ -0,0 +1,324 @@ +package restorer + +import ( + "context" + "io" + "path/filepath" + + "github.com/restic/restic/internal/crypto" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" +) + +// TODO if a blob is corrupt, there may be good blob copies in other packs +// TODO evaluate if it makes sense to split download and processing workers +// pro: can (slowly) read network and decrypt/write files concurrently +// con: each worker needs to keep one pack in memory +// TODO evaluate memory footprint for larger repositories, say 10M packs/10M files +// TODO consider replacing pack file cache with blob cache +// TODO avoid decrypting the same blob multiple times +// TODO evaluate disabled debug logging overhead for large repositories + +const ( + workerCount = 8 + + // max number of open output file handles + filesWriterCount = 32 + + // estimated average pack size used to calculate pack cache capacity + averagePackSize = 5 * 1024 * 1024 + + // pack cache capacity should support at least one cached pack per worker + // allow space for extra 5 packs for actual caching + packCacheCapacity = (workerCount + 5) * averagePackSize +) + +// information about regular file being restored +type fileInfo struct { + location string // file on local filesystem relative to restorer basedir + blobs []restic.ID // remaining blobs of the file +} + +// information about a data pack required to restore one or more files +type packInfo struct { + // the pack id + id restic.ID + + // set of files that use blobs from this pack + files map[*fileInfo]struct{} + + // number of other packs that must be downloaded before all blobs in this pack can be used + cost int + + // used by packHeap + index int +} + +// fileRestorer restores set of files +type fileRestorer struct { + key *crypto.Key + idx filePackTraverser + packLoader func(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error + + packCache *packCache // pack cache + filesWriter *filesWriter // file write + + dst string + files []*fileInfo +} + +func newFileRestorer(dst string, packLoader func(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error, key *crypto.Key, idx filePackTraverser) *fileRestorer { + return &fileRestorer{ + packLoader: packLoader, + key: key, + idx: idx, + filesWriter: newFilesWriter(filesWriterCount), + packCache: newPackCache(packCacheCapacity), + dst: dst, + } +} + +func (r *fileRestorer) addFile(location string, content restic.IDs) { + r.files = append(r.files, &fileInfo{location: location, blobs: content}) +} + +func (r *fileRestorer) targetPath(location string) string { + return filepath.Join(r.dst, location) +} + +// used to pass information among workers (wish golang channels allowed multivalues) +type processingInfo struct { + pack *packInfo + files map[*fileInfo]error +} + +func (r *fileRestorer) restoreFiles(ctx context.Context, onError func(path string, err error)) error { + // TODO conditionally enable when debug log is on + // for _, file := range r.files { + // dbgmsg := file.location + ": " + // r.idx.forEachFilePack(file, func(packIdx int, packID restic.ID, packBlobs []restic.Blob) bool { + // if packIdx > 0 { + // dbgmsg += ", " + // } + // dbgmsg += "pack{id=" + packID.Str() + ", blobs: " + // for blobIdx, blob := range packBlobs { + // if blobIdx > 0 { + // dbgmsg += ", " + // } + // dbgmsg += blob.ID.Str() + // } + // dbgmsg += "}" + // return true // keep going + // }) + // debug.Log(dbgmsg) + // } + + inprogress := make(map[*fileInfo]struct{}) + queue, err := newPackQueue(r.idx, r.files, func(files map[*fileInfo]struct{}) bool { + for file := range files { + if _, found := inprogress[file]; found { + return true + } + } + return false + }) + if err != nil { + return err + } + + // workers + downloadCh := make(chan processingInfo) + feedbackCh := make(chan processingInfo) + + defer close(downloadCh) + defer close(feedbackCh) + + worker := func() { + for { + select { + case <-ctx.Done(): + return + case request, ok := <-downloadCh: + if !ok { + return // channel closed + } + rd, err := r.downloadPack(ctx, request.pack) + if err == nil { + r.processPack(ctx, request, rd) + } else { + // mark all files as failed + for file := range request.files { + request.files[file] = err + } + } + feedbackCh <- request + } + } + } + for i := 0; i < workerCount; i++ { + go worker() + } + + processFeedback := func(pack *packInfo, ferrors map[*fileInfo]error) { + // update files blobIdx + // must do it here to avoid race among worker and processing feedback threads + var success []*fileInfo + var failure []*fileInfo + for file, ferr := range ferrors { + target := r.targetPath(file.location) + if ferr != nil { + onError(file.location, ferr) + r.filesWriter.close(target) + delete(inprogress, file) + failure = append(failure, file) + } else { + r.idx.forEachFilePack(file, func(packIdx int, packID restic.ID, packBlobs []restic.Blob) bool { + file.blobs = file.blobs[len(packBlobs):] + return false // only interesed in the first pack + }) + if len(file.blobs) == 0 { + r.filesWriter.close(target) + delete(inprogress, file) + } + success = append(success, file) + } + } + // update the queue and requeueu the pack as necessary + if !queue.requeuePack(pack, success, failure) { + r.packCache.remove(pack.id) + debug.Log("Purged used up pack %s from pack cache", pack.id.Str()) + } + } + + // the main restore loop + for !queue.isEmpty() { + debug.Log("-----------------------------------") + pack, files := queue.nextPack() + if pack != nil { + ferrors := make(map[*fileInfo]error) + for _, file := range files { + ferrors[file] = nil + inprogress[file] = struct{}{} + } + select { + case <-ctx.Done(): + return ctx.Err() + case downloadCh <- processingInfo{pack: pack, files: ferrors}: + debug.Log("Scheduled download pack %s (%d files)", pack.id.Str(), len(files)) + case feedback := <-feedbackCh: + queue.requeuePack(pack, []*fileInfo{}, []*fileInfo{}) // didn't use the pack during this iteration + processFeedback(feedback.pack, feedback.files) + } + } else { + select { + case <-ctx.Done(): + return ctx.Err() + case feedback := <-feedbackCh: + processFeedback(feedback.pack, feedback.files) + } + } + } + + return nil +} + +func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) (readerAtCloser, error) { + const MaxInt64 = 1<<63 - 1 // odd Go does not have this predefined somewhere + + // calculate pack byte range + start, end := int64(MaxInt64), int64(0) + for file := range pack.files { + r.idx.forEachFilePack(file, func(packIdx int, packID restic.ID, packBlobs []restic.Blob) bool { + if packID.Equal(pack.id) { + for _, blob := range packBlobs { + if start > int64(blob.Offset) { + start = int64(blob.Offset) + } + if end < int64(blob.Offset+blob.Length) { + end = int64(blob.Offset + blob.Length) + } + } + } + + return true // keep going + }) + } + + packReader, err := r.packCache.get(pack.id, start, int(end-start), func(offset int64, length int, wr io.WriteSeeker) error { + h := restic.Handle{Type: restic.DataFile, Name: pack.id.String()} + return r.packLoader(ctx, h, length, offset, func(rd io.Reader) error { + // reset the file in case of a download retry + _, err := wr.Seek(0, io.SeekStart) + if err != nil { + return err + } + + len, err := io.Copy(wr, rd) + if err != nil { + return err + } + if len != int64(length) { + return errors.Errorf("unexpected pack size: expected %d but got %d", length, len) + } + + return nil + }) + }) + if err != nil { + return nil, err + } + + return packReader, nil +} + +func (r *fileRestorer) processPack(ctx context.Context, request processingInfo, rd readerAtCloser) { + defer rd.Close() + + for file := range request.files { + target := r.targetPath(file.location) + r.idx.forEachFilePack(file, func(packIdx int, packID restic.ID, packBlobs []restic.Blob) bool { + for _, blob := range packBlobs { + debug.Log("Writing blob %s (%d bytes) from pack %s to %s", blob.ID.Str(), blob.Length, packID.Str(), file.location) + buf, err := r.loadBlob(rd, blob) + if err == nil { + err = r.filesWriter.writeToFile(target, buf) + } + if err != nil { + request.files[file] = err + break // could not restore the file + } + } + return false + }) + } +} + +func (r *fileRestorer) loadBlob(rd io.ReaderAt, blob restic.Blob) ([]byte, error) { + // TODO reconcile with Repository#loadBlob implementation + + buf := make([]byte, blob.Length) + + n, err := rd.ReadAt(buf, int64(blob.Offset)) + if err != nil { + return nil, err + } + + if n != int(blob.Length) { + return nil, errors.Errorf("error loading blob %v: wrong length returned, want %d, got %d", blob.ID.Str(), blob.Length, n) + } + + // decrypt + nonce, ciphertext := buf[:r.key.NonceSize()], buf[r.key.NonceSize():] + plaintext, err := r.key.Open(ciphertext[:0], nonce, ciphertext, nil) + if err != nil { + return nil, errors.Errorf("decrypting blob %v failed: %v", blob.ID, err) + } + + // check hash + if !restic.Hash(plaintext).Equal(blob.ID) { + return nil, errors.Errorf("blob %v returned invalid hash", blob.ID) + } + + return plaintext, nil +} diff --git a/internal/restorer/filerestorer_test.go b/internal/restorer/filerestorer_test.go new file mode 100644 index 000000000..dd022e9d4 --- /dev/null +++ b/internal/restorer/filerestorer_test.go @@ -0,0 +1,212 @@ +package restorer + +import ( + "bytes" + "context" + "io" + "io/ioutil" + "testing" + + "github.com/restic/restic/internal/crypto" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +type TestBlob struct { + data string + pack string +} + +type TestFile struct { + name string + blobs []TestBlob +} + +type TestRepo struct { + key *crypto.Key + + // pack names and ids + packsNameToID map[string]restic.ID + packsIDToName map[restic.ID]string + packsIDToData map[restic.ID][]byte + + // blobs and files + blobs map[restic.ID][]restic.PackedBlob + files []*fileInfo + filesPathToContent map[string]string + + // + loader func(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error + + // + idx filePackTraverser +} + +func (i *TestRepo) Lookup(blobID restic.ID, _ restic.BlobType) ([]restic.PackedBlob, bool) { + packs, found := i.blobs[blobID] + return packs, found +} + +func (i *TestRepo) packName(pack *packInfo) string { + return i.packsIDToName[pack.id] +} + +func (i *TestRepo) packID(name string) restic.ID { + return i.packsNameToID[name] +} + +func (i *TestRepo) pack(queue *packQueue, name string) *packInfo { + id := i.packsNameToID[name] + return queue.packs[id] +} + +func (i *TestRepo) fileContent(file *fileInfo) string { + return i.filesPathToContent[file.location] +} + +func newTestRepo(content []TestFile) *TestRepo { + type Pack struct { + name string + data []byte + blobs map[restic.ID]restic.Blob + } + packs := make(map[string]Pack) + + key := crypto.NewRandomKey() + seal := func(data []byte) []byte { + ciphertext := restic.NewBlobBuffer(len(data)) + ciphertext = ciphertext[:0] // truncate the slice + nonce := crypto.NewRandomNonce() + ciphertext = append(ciphertext, nonce...) + return key.Seal(ciphertext, nonce, data, nil) + } + + filesPathToContent := make(map[string]string) + + for _, file := range content { + var content string + for _, blob := range file.blobs { + content += blob.data + + // get the pack, create as necessary + var pack Pack + var found bool + if pack, found = packs[blob.pack]; !found { + pack = Pack{name: blob.pack, blobs: make(map[restic.ID]restic.Blob)} + } + + // calculate blob id and add to the pack as necessary + blobID := restic.Hash([]byte(blob.data)) + if _, found := pack.blobs[blobID]; !found { + blobData := seal([]byte(blob.data)) + pack.blobs[blobID] = restic.Blob{ + Type: restic.DataBlob, + ID: blobID, + Length: uint(len(blobData)), + Offset: uint(len(pack.data)), + } + pack.data = append(pack.data, blobData...) + } + + packs[blob.pack] = pack + } + filesPathToContent[file.name] = content + } + + blobs := make(map[restic.ID][]restic.PackedBlob) + packsIDToName := make(map[restic.ID]string) + packsIDToData := make(map[restic.ID][]byte) + packsNameToID := make(map[string]restic.ID) + + for _, pack := range packs { + packID := restic.Hash(pack.data) + packsIDToName[packID] = pack.name + packsIDToData[packID] = pack.data + packsNameToID[pack.name] = packID + for blobID, blob := range pack.blobs { + blobs[blobID] = append(blobs[blobID], restic.PackedBlob{Blob: blob, PackID: packID}) + } + } + + var files []*fileInfo + for _, file := range content { + content := restic.IDs{} + for _, blob := range file.blobs { + content = append(content, restic.Hash([]byte(blob.data))) + } + files = append(files, &fileInfo{location: file.name, blobs: content}) + } + + repo := &TestRepo{ + key: key, + packsIDToName: packsIDToName, + packsIDToData: packsIDToData, + packsNameToID: packsNameToID, + blobs: blobs, + files: files, + filesPathToContent: filesPathToContent, + } + repo.idx = filePackTraverser{lookup: repo.Lookup} + repo.loader = func(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error { + packID, err := restic.ParseID(h.Name) + if err != nil { + return err + } + rd := bytes.NewReader(repo.packsIDToData[packID][int(offset) : int(offset)+length]) + return fn(rd) + } + + return repo +} + +func restoreAndVerify(t *testing.T, tempdir string, content []TestFile) { + repo := newTestRepo(content) + + r := newFileRestorer(tempdir, repo.loader, repo.key, repo.idx) + r.files = repo.files + + r.restoreFiles(context.TODO(), func(path string, err error) { + rtest.OK(t, errors.Wrapf(err, "unexpected error")) + }) + + for _, file := range repo.files { + target := r.targetPath(file.location) + data, err := ioutil.ReadFile(target) + if err != nil { + t.Errorf("unable to read file %v: %v", file.location, err) + continue + } + + rtest.Equals(t, false, r.filesWriter.writers.Contains(target)) + + content := repo.fileContent(file) + if !bytes.Equal(data, []byte(content)) { + t.Errorf("file %v has wrong content: want %q, got %q", file.location, content, data) + } + } + + rtest.OK(t, nil) +} + +func TestFileRestorerBasic(t *testing.T) { + tempdir, cleanup := rtest.TempDir(t) + defer cleanup() + + restoreAndVerify(t, tempdir, []TestFile{ + TestFile{ + name: "file1", + blobs: []TestBlob{ + TestBlob{"data1-1", "pack1-1"}, + TestBlob{"data1-2", "pack1-2"}, + }, + }, + TestFile{ + name: "file2", + blobs: []TestBlob{ + TestBlob{"data2-1", "pack2-1"}, + TestBlob{"data2-2", "pack2-2"}, + }, + }, + }) +} diff --git a/internal/restorer/fileswriter.go b/internal/restorer/fileswriter.go new file mode 100644 index 000000000..af7ea8428 --- /dev/null +++ b/internal/restorer/fileswriter.go @@ -0,0 +1,70 @@ +package restorer + +import ( + "io" + "os" + "sync" + + "github.com/hashicorp/golang-lru/simplelru" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" +) + +type filesWriter struct { + lock sync.Mutex // guards concurrent access + inprogress map[string]struct{} // (logically) opened file writers + writers simplelru.LRUCache // key: string, value: *os.File +} + +func newFilesWriter(count int) *filesWriter { + writers, _ := simplelru.NewLRU(count, func(key interface{}, value interface{}) { + value.(*os.File).Close() + debug.Log("Closed and purged cached writer for %v", key) + }) + return &filesWriter{inprogress: make(map[string]struct{}), writers: writers} +} + +func (w *filesWriter) writeToFile(path string, buf []byte) error { + acquireWriter := func() (io.Writer, error) { + w.lock.Lock() + defer w.lock.Unlock() + if wr, ok := w.writers.Get(path); ok { + debug.Log("Used cached writer for %s", path) + return wr.(*os.File), nil + } + var flags int + if _, append := w.inprogress[path]; append { + flags = os.O_APPEND | os.O_WRONLY + } else { + w.inprogress[path] = struct{}{} + flags = os.O_CREATE | os.O_TRUNC | os.O_WRONLY + } + wr, err := os.OpenFile(path, flags, 0600) + if err != nil { + return nil, err + } + w.writers.Add(path, wr) + debug.Log("Opened and cached writer for %s", path) + return wr, nil + } + + wr, err := acquireWriter() + if err != nil { + return err + } + n, err := wr.Write(buf) + if err != nil { + return err + } + if n != len(buf) { + return errors.Errorf("error writing file %v: wrong length written, want %d, got %d", path, len(buf), n) + } + return nil +} + +func (w *filesWriter) close(path string) { + w.lock.Lock() + defer w.lock.Unlock() + w.writers.Remove(path) + delete(w.inprogress, path) +} diff --git a/internal/restorer/fileswriter_test.go b/internal/restorer/fileswriter_test.go new file mode 100644 index 000000000..45c2a88fb --- /dev/null +++ b/internal/restorer/fileswriter_test.go @@ -0,0 +1,44 @@ +package restorer + +import ( + "io/ioutil" + "testing" + + rtest "github.com/restic/restic/internal/test" +) + +func TestFilesWriterBasic(t *testing.T) { + dir, cleanup := rtest.TempDir(t) + defer cleanup() + + w := newFilesWriter(1) + + f1 := dir + "/f1" + f2 := dir + "/f2" + + rtest.OK(t, w.writeToFile(f1, []byte{1})) + rtest.Equals(t, 1, w.writers.Len()) + rtest.Equals(t, 1, len(w.inprogress)) + + rtest.OK(t, w.writeToFile(f2, []byte{2})) + rtest.Equals(t, 1, w.writers.Len()) + rtest.Equals(t, 2, len(w.inprogress)) + + rtest.OK(t, w.writeToFile(f1, []byte{1})) + w.close(f1) + rtest.Equals(t, 0, w.writers.Len()) + rtest.Equals(t, 1, len(w.inprogress)) + + rtest.OK(t, w.writeToFile(f2, []byte{2})) + w.close(f2) + rtest.Equals(t, 0, w.writers.Len()) + rtest.Equals(t, 0, len(w.inprogress)) + + buf, err := ioutil.ReadFile(f1) + rtest.OK(t, err) + rtest.Equals(t, []byte{1, 1}, buf) + + buf, err = ioutil.ReadFile(f2) + rtest.OK(t, err) + rtest.Equals(t, []byte{2, 2}, buf) +} diff --git a/internal/restorer/packcache.go b/internal/restorer/packcache.go new file mode 100644 index 000000000..1eaad63bf --- /dev/null +++ b/internal/restorer/packcache.go @@ -0,0 +1,243 @@ +package restorer + +import ( + "io" + "sync" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" +) + +// packCache is thread safe in-memory cache of pack files required to restore +// one or more files. The cache is meant to hold pack files that cannot be +// fully used right away. This happens when pack files contains blobs from +// "head" of some files and "middle" of other files. "Middle" blobs cannot be +// written to their files until after blobs from some other packs are written +// to the files first. +// +// While the cache is thread safe, implementation assumes (and enforces) +// that individual entries are used by one client at a time. Clients must +// #Close() entry's reader to make the entry available for use by other +// clients. This limitation can be relaxed in the future if necessary. +type packCache struct { + // guards access to cache internal data structures + lock sync.Mutex + + // cache capacity + capacity int + reservedCapacity int + allocatedCapacity int + + // pack records currently being used by active restore worker + reservedPacks map[restic.ID]*packCacheRecord + + // unused allocated packs, can be deleted if necessary + cachedPacks map[restic.ID]*packCacheRecord +} + +type packCacheRecord struct { + master *packCacheRecord + cache *packCache + + id restic.ID // cached pack id + offset int64 // cached pack byte range + + data []byte +} + +type readerAtCloser interface { + io.Closer + io.ReaderAt +} + +type bytesWriteSeeker struct { + pos int + data []byte +} + +func (wr *bytesWriteSeeker) Write(p []byte) (n int, err error) { + if wr.pos+len(p) > len(wr.data) { + return -1, errors.Errorf("not enough space") + } + n = copy(wr.data[wr.pos:], p) + wr.pos += n + return n, nil +} + +func (wr *bytesWriteSeeker) Seek(offset int64, whence int) (int64, error) { + if offset != 0 || whence != io.SeekStart { + return -1, errors.Errorf("unsupported seek request") + } + wr.pos = 0 + return 0, nil +} + +func newPackCache(capacity int) *packCache { + return &packCache{ + capacity: capacity, + reservedPacks: make(map[restic.ID]*packCacheRecord), + cachedPacks: make(map[restic.ID]*packCacheRecord), + } +} + +func (c *packCache) reserve(packID restic.ID, offset int64, length int) (record *packCacheRecord, err error) { + c.lock.Lock() + defer c.lock.Unlock() + + if offset < 0 || length <= 0 { + return nil, errors.Errorf("illegal pack cache allocation range %s {offset: %d, length: %d}", packID.Str(), offset, length) + } + + if c.reservedCapacity+length > c.capacity { + return nil, errors.Errorf("not enough cache capacity: requested %d, available %d", length, c.capacity-c.reservedCapacity) + } + + if _, ok := c.reservedPacks[packID]; ok { + return nil, errors.Errorf("pack is already reserved %s", packID.Str()) + } + + // the pack is available in the cache and currently unused + if pack, ok := c.cachedPacks[packID]; ok { + // check if cached pack includes requested byte range + // the range can shrink, but it never grows bigger unless there is a bug elsewhere + if pack.offset > offset || (pack.offset+int64(len(pack.data))) < (offset+int64(length)) { + return nil, errors.Errorf("cached range %d-%d is smaller than requested range %d-%d for pack %s", pack.offset, pack.offset+int64(len(pack.data)), length, offset+int64(length), packID.Str()) + } + + // move the pack to the used map + delete(c.cachedPacks, packID) + c.reservedPacks[packID] = pack + c.reservedCapacity += len(pack.data) + + debug.Log("Using cached pack %s (%d bytes)", pack.id.Str(), len(pack.data)) + + if pack.offset != offset || len(pack.data) != length { + // restrict returned record to requested range + return &packCacheRecord{ + cache: c, + master: pack, + offset: offset, + data: pack.data[int(offset-pack.offset) : int(offset-pack.offset)+length], + }, nil + } + + return pack, nil + } + + for c.allocatedCapacity+length > c.capacity { + // all cached packs will be needed at some point + // so it does not matter which one to purge + for _, cached := range c.cachedPacks { + delete(c.cachedPacks, cached.id) + c.allocatedCapacity -= len(cached.data) + debug.Log("dropped cached pack %s (%d bytes)", cached.id.Str(), len(cached.data)) + break + } + } + + pack := &packCacheRecord{ + cache: c, + id: packID, + offset: offset, + } + c.reservedPacks[pack.id] = pack + c.allocatedCapacity += length + c.reservedCapacity += length + + return pack, nil +} + +// get returns reader of the specified cached pack. Uses provided load func +// to download pack content if necessary. +// The returned reader is only able to read pack within byte range specified +// by offset and length parameters, attempts to read outside that range will +// result in an error. +// The returned reader must be closed before the same packID can be requested +// from the cache again. +func (c *packCache) get(packID restic.ID, offset int64, length int, load func(offset int64, length int, wr io.WriteSeeker) error) (readerAtCloser, error) { + pack, err := c.reserve(packID, offset, length) + if err != nil { + return nil, err + } + + if pack.data == nil { + releasePack := func() { + delete(c.reservedPacks, pack.id) + c.reservedCapacity -= length + c.allocatedCapacity -= length + } + wr := &bytesWriteSeeker{data: make([]byte, length)} + err = load(offset, length, wr) + if err != nil { + releasePack() + return nil, err + } + if wr.pos != length { + releasePack() + return nil, errors.Errorf("invalid read size") + } + pack.data = wr.data + debug.Log("Downloaded and cached pack %s (%d bytes)", pack.id.Str(), len(pack.data)) + } + + return pack, nil +} + +// releases the pack record back to the cache +func (c *packCache) release(pack *packCacheRecord) error { + c.lock.Lock() + defer c.lock.Unlock() + + if _, ok := c.reservedPacks[pack.id]; !ok { + return errors.Errorf("invalid pack release request") + } + + delete(c.reservedPacks, pack.id) + c.cachedPacks[pack.id] = pack + c.reservedCapacity -= len(pack.data) + + return nil +} + +// remove removes specified pack from the cache and frees +// corresponding cache space. should be called after the pack +// was fully used up by the restorer. +func (c *packCache) remove(packID restic.ID) error { + c.lock.Lock() + defer c.lock.Unlock() + + if _, ok := c.reservedPacks[packID]; ok { + return errors.Errorf("invalid pack remove request, pack %s is reserved", packID.Str()) + } + + pack, ok := c.cachedPacks[packID] + if !ok { + return errors.Errorf("invalid pack remove request, pack %s is not cached", packID.Str()) + } + + delete(c.cachedPacks, pack.id) + c.allocatedCapacity -= len(pack.data) + + return nil +} + +// ReadAt reads len(b) bytes from the pack starting at byte offset off. +// It returns the number of bytes read and the error, if any. +func (r *packCacheRecord) ReadAt(b []byte, off int64) (n int, err error) { + if off < r.offset || off+int64(len(b)) > r.offset+int64(len(r.data)) { + return -1, errors.Errorf("read outside available range") + } + return copy(b, r.data[off-r.offset:]), nil +} + +// Close closes the pack reader and releases corresponding cache record +// to the cache. Once closed, the record can be reused by subsequent +// requests for the same packID or it can be purged from the cache to make +// room for other packs +func (r *packCacheRecord) Close() (err error) { + if r.master != nil { + return r.cache.release(r.master) + } + return r.cache.release(r) +} diff --git a/internal/restorer/packcache_test.go b/internal/restorer/packcache_test.go new file mode 100644 index 000000000..3a5f18cf5 --- /dev/null +++ b/internal/restorer/packcache_test.go @@ -0,0 +1,305 @@ +package restorer + +import ( + "io" + "testing" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func assertNotOK(t *testing.T, msg string, err error) { + rtest.Assert(t, err != nil, msg+" did not fail") +} + +func TestBytesWriterSeeker(t *testing.T) { + wr := &bytesWriteSeeker{data: make([]byte, 10)} + + n, err := wr.Write([]byte{1, 2}) + rtest.OK(t, err) + rtest.Equals(t, 2, n) + rtest.Equals(t, []byte{1, 2}, wr.data[0:2]) + + n64, err := wr.Seek(0, io.SeekStart) + rtest.OK(t, err) + rtest.Equals(t, int64(0), n64) + + n, err = wr.Write([]byte{0, 1, 2, 3, 4}) + rtest.OK(t, err) + rtest.Equals(t, 5, n) + n, err = wr.Write([]byte{5, 6, 7, 8, 9}) + rtest.OK(t, err) + rtest.Equals(t, 5, n) + rtest.Equals(t, []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, wr.data) + + // negative tests + _, err = wr.Write([]byte{1}) + assertNotOK(t, "write overflow", err) + _, err = wr.Seek(1, io.SeekStart) + assertNotOK(t, "unsupported seek", err) +} + +func TestPackCacheBasic(t *testing.T) { + assertReader := func(expected []byte, offset int64, rd io.ReaderAt) { + actual := make([]byte, len(expected)) + rd.ReadAt(actual, offset) + rtest.Equals(t, expected, actual) + } + + c := newPackCache(10) + + id := restic.NewRandomID() + + // load pack to the cache + rd, err := c.get(id, 10, 5, func(offset int64, length int, wr io.WriteSeeker) error { + rtest.Equals(t, int64(10), offset) + rtest.Equals(t, 5, length) + wr.Write([]byte{1, 2, 3, 4, 5}) + return nil + }) + rtest.OK(t, err) + assertReader([]byte{1, 2, 3, 4, 5}, 10, rd) + + // must close pack reader before can request it again + _, err = c.get(id, 10, 5, func(offset int64, length int, wr io.WriteSeeker) error { + t.Error("unexpected cache load call") + return nil + }) + assertNotOK(t, "double-reservation", err) + + // close the pack reader and get it from cache + rd.Close() + rd, err = c.get(id, 10, 5, func(offset int64, length int, wr io.WriteSeeker) error { + t.Error("unexpected cache load call") + return nil + }) + rtest.OK(t, err) + assertReader([]byte{1, 2, 3, 4, 5}, 10, rd) + + // close the pack reader and remove the pack from cache, assert the pack is loaded on request + rd.Close() + c.remove(id) + rd, err = c.get(id, 10, 5, func(offset int64, length int, wr io.WriteSeeker) error { + rtest.Equals(t, int64(10), offset) + rtest.Equals(t, 5, length) + wr.Write([]byte{1, 2, 3, 4, 5}) + return nil + }) + rtest.OK(t, err) + assertReader([]byte{1, 2, 3, 4, 5}, 10, rd) +} + +func TestPackCacheInvalidRange(t *testing.T) { + c := newPackCache(10) + + id := restic.NewRandomID() + + _, err := c.get(id, -1, 1, func(offset int64, length int, wr io.WriteSeeker) error { + t.Error("unexpected cache load call") + return nil + }) + assertNotOK(t, "negative offset request", err) + + _, err = c.get(id, 0, 0, func(offset int64, length int, wr io.WriteSeeker) error { + t.Error("unexpected cache load call") + return nil + }) + assertNotOK(t, "zero length request", err) + + _, err = c.get(id, 0, -1, func(offset int64, length int, wr io.WriteSeeker) error { + t.Error("unexpected cache load call") + return nil + }) + assertNotOK(t, "negative length", err) +} + +func TestPackCacheCapacity(t *testing.T) { + c := newPackCache(10) + + id1, id2, id3 := restic.NewRandomID(), restic.NewRandomID(), restic.NewRandomID() + + // load and reserve pack1 + rd1, err := c.get(id1, 0, 5, func(offset int64, length int, wr io.WriteSeeker) error { + wr.Write([]byte{1, 2, 3, 4, 5}) + return nil + }) + rtest.OK(t, err) + + // load and reserve pack2 + _, err = c.get(id2, 0, 5, func(offset int64, length int, wr io.WriteSeeker) error { + wr.Write([]byte{1, 2, 3, 4, 5}) + return nil + }) + rtest.OK(t, err) + + // can't load pack3 because not enough space in the cache + _, err = c.get(id3, 0, 5, func(offset int64, length int, wr io.WriteSeeker) error { + t.Error("unexpected cache load call") + return nil + }) + assertNotOK(t, "request over capacity", err) + + // release pack1 and try again + rd1.Close() + rd3, err := c.get(id3, 0, 5, func(offset int64, length int, wr io.WriteSeeker) error { + wr.Write([]byte{1, 2, 3, 4, 5}) + return nil + }) + rtest.OK(t, err) + + // release pack3 and load pack1 (should not come from cache) + rd3.Close() + loaded := false + rd1, err = c.get(id1, 0, 5, func(offset int64, length int, wr io.WriteSeeker) error { + wr.Write([]byte{1, 2, 3, 4, 5}) + loaded = true + return nil + }) + rtest.OK(t, err) + rtest.Equals(t, true, loaded) +} + +func TestPackCacheDownsizeRecord(t *testing.T) { + c := newPackCache(10) + + id := restic.NewRandomID() + + // get bigger range first + rd, err := c.get(id, 5, 5, func(offset int64, length int, wr io.WriteSeeker) error { + wr.Write([]byte{1, 2, 3, 4, 5}) + return nil + }) + rtest.OK(t, err) + rd.Close() + + // invalid "resize" requests + _, err = c.get(id, 5, 10, func(offset int64, length int, wr io.WriteSeeker) error { + t.Error("unexpected pack load") + return nil + }) + assertNotOK(t, "resize cached record", err) + + // invalid before cached range request + _, err = c.get(id, 0, 5, func(offset int64, length int, wr io.WriteSeeker) error { + t.Error("unexpected pack load") + return nil + }) + assertNotOK(t, "before cached range request", err) + + // invalid after cached range request + _, err = c.get(id, 10, 5, func(offset int64, length int, wr io.WriteSeeker) error { + t.Error("unexpected pack load") + return nil + }) + assertNotOK(t, "after cached range request", err) + + // now get smaller "nested" range + rd, err = c.get(id, 7, 1, func(offset int64, length int, wr io.WriteSeeker) error { + t.Error("unexpected pack load") + return nil + }) + rtest.OK(t, err) + + // assert expected data + buf := make([]byte, 1) + rd.ReadAt(buf, 7) + rtest.Equals(t, byte(3), buf[0]) + _, err = rd.ReadAt(buf, 0) + assertNotOK(t, "read before downsized pack range", err) + _, err = rd.ReadAt(buf, 9) + assertNotOK(t, "read after downsized pack range", err) + + // can't request downsized record again + _, err = c.get(id, 7, 1, func(offset int64, length int, wr io.WriteSeeker) error { + t.Error("unexpected pack load") + return nil + }) + assertNotOK(t, "double-allocation of cache record subrange", err) + + // can't request another subrange of the original record + _, err = c.get(id, 6, 1, func(offset int64, length int, wr io.WriteSeeker) error { + t.Error("unexpected pack load") + return nil + }) + assertNotOK(t, "allocation of another subrange of cache record", err) + + // release downsized record and assert the original is back in the cache + rd.Close() + rd, err = c.get(id, 5, 5, func(offset int64, length int, wr io.WriteSeeker) error { + t.Error("unexpected pack load") + return nil + }) + rtest.OK(t, err) + rd.Close() +} + +func TestPackCacheFailedDownload(t *testing.T) { + c := newPackCache(10) + assertEmpty := func() { + rtest.Equals(t, 0, len(c.cachedPacks)) + rtest.Equals(t, 10, c.capacity) + rtest.Equals(t, 0, c.reservedCapacity) + rtest.Equals(t, 0, c.allocatedCapacity) + } + + _, err := c.get(restic.NewRandomID(), 0, 5, func(offset int64, length int, wr io.WriteSeeker) error { + return errors.Errorf("expected induced test error") + }) + assertNotOK(t, "not enough bytes read", err) + assertEmpty() + + _, err = c.get(restic.NewRandomID(), 0, 5, func(offset int64, length int, wr io.WriteSeeker) error { + wr.Write([]byte{1}) + return nil + }) + assertNotOK(t, "not enough bytes read", err) + assertEmpty() + + _, err = c.get(restic.NewRandomID(), 0, 5, func(offset int64, length int, wr io.WriteSeeker) error { + wr.Write([]byte{1, 2, 3, 4, 5, 6}) + return nil + }) + assertNotOK(t, "too many bytes read", err) + assertEmpty() +} + +func TestPackCacheInvalidRequests(t *testing.T) { + c := newPackCache(10) + + id := restic.NewRandomID() + + // + rd, _ := c.get(id, 0, 1, func(offset int64, length int, wr io.WriteSeeker) error { + wr.Write([]byte{1}) + return nil + }) + assertNotOK(t, "remove() reserved pack", c.remove(id)) + rtest.OK(t, rd.Close()) + assertNotOK(t, "multiple reader Close() calls)", rd.Close()) + + // + rtest.OK(t, c.remove(id)) + assertNotOK(t, "double remove() the same pack", c.remove(id)) +} + +func TestPackCacheRecord(t *testing.T) { + rd := &packCacheRecord{ + offset: 10, + data: []byte{1}, + } + buf := make([]byte, 1) + n, err := rd.ReadAt(buf, 10) + rtest.OK(t, err) + rtest.Equals(t, 1, n) + rtest.Equals(t, byte(1), buf[0]) + + _, err = rd.ReadAt(buf, 0) + assertNotOK(t, "read before loaded range", err) + + _, err = rd.ReadAt(buf, 11) + assertNotOK(t, "read after loaded range", err) + + _, err = rd.ReadAt(make([]byte, 2), 10) + assertNotOK(t, "read more than available data", err) +} diff --git a/internal/restorer/packheap.go b/internal/restorer/packheap.go new file mode 100644 index 000000000..9f8443d46 --- /dev/null +++ b/internal/restorer/packheap.go @@ -0,0 +1,51 @@ +package restorer + +// packHeap is a heap of packInfo references +// @see https://golang.org/pkg/container/heap/ +// @see https://en.wikipedia.org/wiki/Heap_(data_structure) +type packHeap struct { + elements []*packInfo + + // returns true if download of any of the files is in progress + inprogress func(files map[*fileInfo]struct{}) bool +} + +func (pq *packHeap) Len() int { return len(pq.elements) } + +func (pq *packHeap) Less(a, b int) bool { + packA, packB := pq.elements[a], pq.elements[b] + + ap := pq.inprogress(packA.files) + bp := pq.inprogress(packB.files) + if ap && !bp { + return true + } + + if packA.cost < packB.cost { + return true + } + + return false +} + +func (pq *packHeap) Swap(i, j int) { + pq.elements[i], pq.elements[j] = pq.elements[j], pq.elements[i] + pq.elements[i].index = i + pq.elements[j].index = j +} + +func (pq *packHeap) Push(x interface{}) { + n := len(pq.elements) + item := x.(*packInfo) + item.index = n + pq.elements = append(pq.elements, item) +} + +func (pq *packHeap) Pop() interface{} { + old := pq.elements + n := len(old) + item := old[n-1] + item.index = -1 // for safety + pq.elements = old[0 : n-1] + return item +} diff --git a/internal/restorer/packqueue.go b/internal/restorer/packqueue.go new file mode 100644 index 000000000..fe8259846 --- /dev/null +++ b/internal/restorer/packqueue.go @@ -0,0 +1,224 @@ +package restorer + +import ( + "container/heap" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/restic" +) + +// packQueue tracks remaining file contents restore work and decides what pack +// to download and files to write next. +// +// The packs in the queue can be in one of three states: waiting, ready and +// in-progress. +// Waiting packs are the packs that only have blobs from the "middle" of their +// corresponding files and therefore cannot be used until blobs from some other +// packs are written to the files first. +// In-progress packs are the packs that were removed from the queue by #nextPack +// and must be first returned to the queue before they are considered again. +// Ready packs are the packs can be immediately used to restore at least one +// file. Internally ready packs are kept in a heap and are ordered according +// to these criteria: +// - Packs with "head" blobs of in-progress files are considered first. The +// idea is to complete restore of in-progress files before starting restore +// of other files. This is both more intuitive and also reduces number of +// open file handles needed during restore. +// - Packs with smallest cost are considered next. Pack cost is measured in +// number of other packs required before all blobs in the pack can be used +// and the pack can be removed from the pack cache. +// For example, consisder a file that requires two blobs, blob1 from pack1 +// and blob2 from pack2. The cost of pack2 is 1, because blob2 cannot be +// used before blob1 is available. The higher the cost, the longer the pack +// must be cached locally to avoid redownload. +// +// Pack queue implementation is NOT thread safe. All pack queue methods must +// be called from single gorouting AND packInfo and fileInfo instances must +// be updated synchronously from the same gorouting. +type packQueue struct { + idx filePackTraverser + + packs map[restic.ID]*packInfo // waiting and ready packs + inprogress map[*packInfo]struct{} // inprogress packs + + heap *packHeap // heap of ready packs +} + +func newPackQueue(idx filePackTraverser, files []*fileInfo, inprogress func(files map[*fileInfo]struct{}) bool) (*packQueue, error) { + packs := make(map[restic.ID]*packInfo) // all packs + + // create packInfo from fileInfo + for _, file := range files { + err := idx.forEachFilePack(file, func(packIdx int, packID restic.ID, _ []restic.Blob) bool { + pack, ok := packs[packID] + if !ok { + pack = &packInfo{ + id: packID, + index: -1, + files: make(map[*fileInfo]struct{}), + } + packs[packID] = pack + } + pack.files[file] = struct{}{} + pack.cost += packIdx + + return true // keep going + }) + if err != nil { + // repository index is messed up, can't do anything + return nil, err + } + } + + // create packInfo heap + pheap := &packHeap{inprogress: inprogress} + headPacks := restic.NewIDSet() + for _, file := range files { + idx.forEachFilePack(file, func(packIdx int, packID restic.ID, _ []restic.Blob) bool { + if !headPacks.Has(packID) { + headPacks.Insert(packID) + pack := packs[packID] + pack.index = len(pheap.elements) + pheap.elements = append(pheap.elements, pack) + } + return false // only first pack + }) + } + heap.Init(pheap) + + return &packQueue{idx: idx, packs: packs, heap: pheap, inprogress: make(map[*packInfo]struct{})}, nil +} + +// isEmpty returns true if the queue is empty, i.e. there are no more packs to +// download and files to write to. +func (h *packQueue) isEmpty() bool { + return len(h.packs) == 0 && len(h.inprogress) == 0 +} + +// nextPack returns next ready pack and corresponding files ready for download +// and processing. The returned pack and the files are marked as "in progress" +// internally and must be first returned to the queue before they are +// considered by #nextPack again. +func (h *packQueue) nextPack() (*packInfo, []*fileInfo) { + debug.Log("Ready packs %d, outstanding packs %d, inprogress packs %d", h.heap.Len(), len(h.packs), len(h.inprogress)) + + if h.heap.Len() == 0 { + return nil, nil + } + + pack := heap.Pop(h.heap).(*packInfo) + h.inprogress[pack] = struct{}{} + debug.Log("Popped pack %s (%d files), heap size=%d", pack.id.Str(), len(pack.files), len(h.heap.elements)) + var files []*fileInfo + for file := range pack.files { + h.idx.forEachFilePack(file, func(packIdx int, packID restic.ID, packBlobs []restic.Blob) bool { + debug.Log("Pack #%d %s (%d blobs) used by %s", packIdx, packID.Str(), len(packBlobs), file.location) + if pack.id == packID { + files = append(files, file) + } + return false // only interested in the fist pack here + }) + } + + return pack, files +} + +// requeuePack conditionally adds back to the queue pack previously returned by +// #nextPack. +// If the pack is needed to restore any incomplete files, adds the pack to the +// queue and adjusts order of all affected packs in the queue. Has no effect +// if the pack is not required to restore any files. +// Returns true if the pack was added to the queue, false otherwise. +func (h *packQueue) requeuePack(pack *packInfo, success []*fileInfo, failure []*fileInfo) bool { + debug.Log("Requeue pack %s (%d/%d/%d files/success/failure)", pack.id.Str(), len(pack.files), len(success), len(failure)) + + // maintain inprogress pack set + delete(h.inprogress, pack) + + affectedPacks := make(map[*packInfo]struct{}) + affectedPacks[pack] = struct{}{} // this pack is alwats affected + + // apply download success/failure to the packs + onFailure := func(file *fileInfo) { + h.idx.forEachFilePack(file, func(packInx int, packID restic.ID, _ []restic.Blob) bool { + pack := h.packs[packID] + delete(pack.files, file) + pack.cost -= packInx + affectedPacks[pack] = struct{}{} + return true // keep going + }) + } + for _, file := range failure { + onFailure(file) + } + onSuccess := func(pack *packInfo, file *fileInfo) { + remove := true + h.idx.forEachFilePack(file, func(packIdx int, packID restic.ID, _ []restic.Blob) bool { + if packID.Equal(pack.id) { + // the pack has more blobs required by the file + remove = false + } + otherPack := h.packs[packID] + otherPack.cost-- + affectedPacks[otherPack] = struct{}{} + return true // keep going + }) + if remove { + delete(pack.files, file) + } + } + for _, file := range success { + onSuccess(pack, file) + } + + // drop/update affected packs + isReady := func(affectedPack *packInfo) (ready bool) { + for file := range affectedPack.files { + h.idx.forEachFilePack(file, func(packIdx int, packID restic.ID, _ []restic.Blob) bool { + if packID.Equal(affectedPack.id) { + ready = true + } + return false // only file's first pack matters + }) + if ready { + break + } + } + return ready + } + for affectedPack := range affectedPacks { + if _, inprogress := h.inprogress[affectedPack]; !inprogress { + if len(affectedPack.files) == 0 { + // drop the pack if it isn't inprogress and has no files that need it + if affectedPack.index >= 0 { + // This can't happen unless there is a bug elsewhere: + // - "current" pack isn't in the heap, hence its index must be < 0 + // - "other" packs can't be ready (i.e. in heap) unless they have other files + // in which case len(affectedPack.files) must be > 0 + debug.Log("corrupted ready heap: removed unexpected ready pack %s", affectedPack.id.Str()) + heap.Remove(h.heap, affectedPack.index) + } + delete(h.packs, affectedPack.id) + } else { + ready := isReady(affectedPack) + switch { + case ready && affectedPack.index < 0: + heap.Push(h.heap, affectedPack) + case ready && affectedPack.index >= 0: + heap.Fix(h.heap, affectedPack.index) + case !ready && affectedPack.index >= 0: + // This can't happen unless there is a bug elsewhere: + // - "current" pack isn't in the heap, hence its index must be < 0 + // - "other" packs can't have same head blobs as the "current" pack, + // hence "other" packs can't change their readiness + debug.Log("corrupted ready heap: removed unexpected waiting pack %s", affectedPack.id.Str()) + heap.Remove(h.heap, affectedPack.index) + case !ready && affectedPack.index < 0: + // do nothing + } + } + } + } + + return len(pack.files) > 0 +} diff --git a/internal/restorer/packqueue_test.go b/internal/restorer/packqueue_test.go new file mode 100644 index 000000000..880f7037a --- /dev/null +++ b/internal/restorer/packqueue_test.go @@ -0,0 +1,236 @@ +package restorer + +import ( + "testing" + + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func processPack(t *testing.T, data *TestRepo, pack *packInfo, files []*fileInfo) { + for _, file := range files { + data.idx.forEachFilePack(file, func(packIdx int, packID restic.ID, packBlobs []restic.Blob) bool { + // assert file's head pack + rtest.Equals(t, pack.id, packID) + file.blobs = file.blobs[len(packBlobs):] + return false // only interested in the head pack + }) + } +} + +func TestPackQueueBasic(t *testing.T) { + data := newTestRepo([]TestFile{ + TestFile{ + name: "file", + blobs: []TestBlob{ + TestBlob{"data1", "pack1"}, + TestBlob{"data2", "pack2"}, + }, + }, + }) + + queue, err := newPackQueue(data.idx, data.files, func(_ map[*fileInfo]struct{}) bool { return false }) + rtest.OK(t, err) + + // assert initial queue state + rtest.Equals(t, false, queue.isEmpty()) + rtest.Equals(t, 0, queue.packs[data.packID("pack1")].cost) + rtest.Equals(t, 1, queue.packs[data.packID("pack2")].cost) + + // get first pack + pack, files := queue.nextPack() + rtest.Equals(t, "pack1", data.packName(pack)) + rtest.Equals(t, 1, len(files)) + rtest.Equals(t, false, queue.isEmpty()) + // TODO assert pack is inprogress + + // can't process the second pack until the first one is processed + { + pack, files := queue.nextPack() + rtest.Equals(t, true, pack == nil) + rtest.Equals(t, true, files == nil) + rtest.Equals(t, false, queue.isEmpty()) + } + + // requeue the pack without processing + rtest.Equals(t, true, queue.requeuePack(pack, []*fileInfo{}, []*fileInfo{})) + rtest.Equals(t, false, queue.isEmpty()) + rtest.Equals(t, 0, queue.packs[data.packID("pack1")].cost) + rtest.Equals(t, 1, queue.packs[data.packID("pack2")].cost) + + // get the first pack again + pack, files = queue.nextPack() + rtest.Equals(t, "pack1", data.packName(pack)) + rtest.Equals(t, 1, len(files)) + rtest.Equals(t, false, queue.isEmpty()) + + // process the first pack and return it to the queue + processPack(t, data, pack, files) + rtest.Equals(t, false, queue.requeuePack(pack, files, []*fileInfo{})) + rtest.Equals(t, 0, queue.packs[data.packID("pack2")].cost) + + // get the second pack + pack, files = queue.nextPack() + rtest.Equals(t, "pack2", data.packName(pack)) + rtest.Equals(t, 1, len(files)) + rtest.Equals(t, false, queue.isEmpty()) + + // process the second pack and return it to the queue + processPack(t, data, pack, files) + rtest.Equals(t, false, queue.requeuePack(pack, files, []*fileInfo{})) + + // all packs processed + rtest.Equals(t, true, queue.isEmpty()) +} + +func TestPackQueueFailedFile(t *testing.T) { + // point of this test is to assert that enqueuePack removes + // all references to files that failed restore + + data := newTestRepo([]TestFile{ + TestFile{ + name: "file", + blobs: []TestBlob{ + TestBlob{"data1", "pack1"}, + TestBlob{"data2", "pack2"}, + }, + }, + }) + + queue, err := newPackQueue(data.idx, data.files, func(_ map[*fileInfo]struct{}) bool { return false }) + rtest.OK(t, err) + + pack, files := queue.nextPack() + rtest.Equals(t, false, queue.requeuePack(pack, []*fileInfo{}, files /*failed*/)) + rtest.Equals(t, true, queue.isEmpty()) +} + +func TestPackQueueOrderingCost(t *testing.T) { + // assert pack1 is selected before pack2: + // pack1 is ready to restore file1, pack2 is ready to restore file2 + // but pack2 cannot be immediately used to restore file1 + + data := newTestRepo([]TestFile{ + TestFile{ + name: "file1", + blobs: []TestBlob{ + TestBlob{"data1", "pack1"}, + TestBlob{"data2", "pack2"}, + }, + }, + TestFile{ + name: "file2", + blobs: []TestBlob{ + TestBlob{"data2", "pack2"}, + }, + }, + }) + + queue, err := newPackQueue(data.idx, data.files, func(_ map[*fileInfo]struct{}) bool { return false }) + rtest.OK(t, err) + + // assert initial pack costs + rtest.Equals(t, 0, data.pack(queue, "pack1").cost) + rtest.Equals(t, 0, data.pack(queue, "pack1").index) // head of the heap + rtest.Equals(t, 1, data.pack(queue, "pack2").cost) + rtest.Equals(t, 1, data.pack(queue, "pack2").index) + + pack, files := queue.nextPack() + // assert selected pack and queue state + rtest.Equals(t, "pack1", data.packName(pack)) + // process the pack + processPack(t, data, pack, files) + rtest.Equals(t, false, queue.requeuePack(pack, files, []*fileInfo{})) +} + +func TestPackQueueOrderingInprogress(t *testing.T) { + // finish restoring one file before starting another + + data := newTestRepo([]TestFile{ + TestFile{ + name: "file1", + blobs: []TestBlob{ + TestBlob{"data1-1", "pack1-1"}, + TestBlob{"data1-2", "pack1-2"}, + }, + }, + TestFile{ + name: "file2", + blobs: []TestBlob{ + TestBlob{"data2-1", "pack2-1"}, + TestBlob{"data2-2", "pack2-2"}, + }, + }, + }) + + var inprogress *fileInfo + queue, err := newPackQueue(data.idx, data.files, func(files map[*fileInfo]struct{}) bool { + _, found := files[inprogress] + return found + }) + rtest.OK(t, err) + + // first pack of a file + pack, files := queue.nextPack() + rtest.Equals(t, 1, len(files)) + file := files[0] + processPack(t, data, pack, files) + inprogress = files[0] + queue.requeuePack(pack, files, []*fileInfo{}) + + // second pack of the same file + pack, files = queue.nextPack() + rtest.Equals(t, 1, len(files)) + rtest.Equals(t, true, file == files[0]) // same file as before + processPack(t, data, pack, files) + inprogress = nil + queue.requeuePack(pack, files, []*fileInfo{}) + + // first pack of the second file + pack, files = queue.nextPack() + rtest.Equals(t, 1, len(files)) + rtest.Equals(t, false, file == files[0]) // different file as before +} + +func TestPackQueuePackMultiuse(t *testing.T) { + // the same pack is required multiple times to restore the same file + + data := newTestRepo([]TestFile{ + TestFile{ + name: "file", + blobs: []TestBlob{ + TestBlob{"data1", "pack1"}, + TestBlob{"data2", "pack2"}, + TestBlob{"data3", "pack1"}, // pack1 reuse, new blob + TestBlob{"data2", "pack2"}, // pack2 reuse, same blob + }, + }, + }) + + queue, err := newPackQueue(data.idx, data.files, func(_ map[*fileInfo]struct{}) bool { return false }) + rtest.OK(t, err) + + pack, files := queue.nextPack() + rtest.Equals(t, "pack1", data.packName(pack)) + rtest.Equals(t, 1, len(pack.files)) + processPack(t, data, pack, files) + rtest.Equals(t, true, queue.requeuePack(pack, files, []*fileInfo{})) + + pack, files = queue.nextPack() + rtest.Equals(t, "pack2", data.packName(pack)) + rtest.Equals(t, 1, len(pack.files)) + processPack(t, data, pack, files) + rtest.Equals(t, true, queue.requeuePack(pack, files, []*fileInfo{})) + + pack, files = queue.nextPack() + rtest.Equals(t, "pack1", data.packName(pack)) + processPack(t, data, pack, files) + rtest.Equals(t, false, queue.requeuePack(pack, files, []*fileInfo{})) + + pack, files = queue.nextPack() + rtest.Equals(t, "pack2", data.packName(pack)) + processPack(t, data, pack, files) + rtest.Equals(t, false, queue.requeuePack(pack, files, []*fileInfo{})) + + rtest.Equals(t, true, queue.isEmpty()) +} diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go new file mode 100644 index 000000000..bbef5083b --- /dev/null +++ b/internal/restorer/restorer.go @@ -0,0 +1,342 @@ +package restorer + +import ( + "context" + "os" + "path/filepath" + + "github.com/restic/restic/internal/crypto" + "github.com/restic/restic/internal/errors" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/restic" +) + +// Restorer is used to restore a snapshot to a directory. +type Restorer struct { + repo restic.Repository + sn *restic.Snapshot + + Error func(location string, err error) error + SelectFilter func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) +} + +var restorerAbortOnAllErrors = func(location string, err error) error { return err } + +// NewRestorer creates a restorer preloaded with the content from the snapshot id. +func NewRestorer(repo restic.Repository, id restic.ID) (*Restorer, error) { + r := &Restorer{ + repo: repo, + Error: restorerAbortOnAllErrors, + SelectFilter: func(string, string, *restic.Node) (bool, bool) { return true, true }, + } + + var err error + + r.sn, err = restic.LoadSnapshot(context.TODO(), repo, id) + if err != nil { + return nil, err + } + + return r, nil +} + +type treeVisitor struct { + enterDir func(node *restic.Node, target, location string) error + visitNode func(node *restic.Node, target, location string) error + leaveDir func(node *restic.Node, target, location string) error +} + +// traverseTree traverses a tree from the repo and calls treeVisitor. +// target is the path in the file system, location within the snapshot. +func (res *Restorer) traverseTree(ctx context.Context, target, location string, treeID restic.ID, visitor treeVisitor) error { + debug.Log("%v %v %v", target, location, treeID) + tree, err := res.repo.LoadTree(ctx, treeID) + if err != nil { + debug.Log("error loading tree %v: %v", treeID, err) + return res.Error(location, err) + } + + for _, node := range tree.Nodes { + + // ensure that the node name does not contain anything that refers to a + // top-level directory. + nodeName := filepath.Base(filepath.Join(string(filepath.Separator), node.Name)) + if nodeName != node.Name { + debug.Log("node %q has invalid name %q", node.Name, nodeName) + err := res.Error(location, errors.Errorf("invalid child node name %s", node.Name)) + if err != nil { + return err + } + continue + } + + nodeTarget := filepath.Join(target, nodeName) + nodeLocation := filepath.Join(location, nodeName) + + if target == nodeTarget || !fs.HasPathPrefix(target, nodeTarget) { + debug.Log("target: %v %v", target, nodeTarget) + debug.Log("node %q has invalid target path %q", node.Name, nodeTarget) + err := res.Error(nodeLocation, errors.New("node has invalid path")) + if err != nil { + return err + } + continue + } + + // sockets cannot be restored + if node.Type == "socket" { + continue + } + + selectedForRestore, childMayBeSelected := res.SelectFilter(nodeLocation, nodeTarget, node) + debug.Log("SelectFilter returned %v %v", selectedForRestore, childMayBeSelected) + + sanitizeError := func(err error) error { + if err != nil { + err = res.Error(nodeLocation, err) + } + return err + } + + if node.Type == "dir" { + if node.Subtree == nil { + return errors.Errorf("Dir without subtree in tree %v", treeID.Str()) + } + + if selectedForRestore { + err = sanitizeError(visitor.enterDir(node, nodeTarget, nodeLocation)) + if err != nil { + return err + } + } + + if childMayBeSelected { + err = sanitizeError(res.traverseTree(ctx, nodeTarget, nodeLocation, *node.Subtree, visitor)) + if err != nil { + return err + } + } + + if selectedForRestore { + err = sanitizeError(visitor.leaveDir(node, nodeTarget, nodeLocation)) + if err != nil { + return err + } + } + + continue + } + + if selectedForRestore { + err = sanitizeError(visitor.visitNode(node, nodeTarget, nodeLocation)) + if err != nil { + return err + } + } + } + + return nil +} + +func (res *Restorer) restoreNodeTo(ctx context.Context, node *restic.Node, target, location string) error { + debug.Log("restoreNode %v %v %v", node.Name, target, location) + + err := node.CreateAt(ctx, target, res.repo) + if err != nil { + debug.Log("node.CreateAt(%s) error %v", target, err) + } + if err == nil { + err = res.restoreNodeMetadataTo(node, target, location) + } + + return err +} + +func (res *Restorer) restoreNodeMetadataTo(node *restic.Node, target, location string) error { + debug.Log("restoreNodeMetadata %v %v %v", node.Name, target, location) + err := node.RestoreMetadata(target) + if err != nil { + debug.Log("node.RestoreMetadata(%s) error %v", target, err) + } + return err +} + +func (res *Restorer) restoreHardlinkAt(node *restic.Node, target, path, location string) error { + if err := fs.Remove(path); !os.IsNotExist(err) { + return errors.Wrap(err, "RemoveCreateHardlink") + } + err := fs.Link(target, path) + if err != nil { + return errors.Wrap(err, "CreateHardlink") + } + // TODO investigate if hardlinks have separate metadata on any supported system + return res.restoreNodeMetadataTo(node, path, location) +} + +func (res *Restorer) restoreEmptyFileAt(node *restic.Node, target, location string) error { + wr, err := os.OpenFile(target, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600) + if err != nil { + return err + } + err = wr.Close() + if err != nil { + return err + } + + return res.restoreNodeMetadataTo(node, target, location) +} + +// RestoreTo creates the directories and files in the snapshot below dst. +// Before an item is created, res.Filter is called. +func (res *Restorer) RestoreTo(ctx context.Context, dst string) error { + var err error + if !filepath.IsAbs(dst) { + dst, err = filepath.Abs(dst) + if err != nil { + return errors.Wrap(err, "Abs") + } + } + + restoreNodeMetadata := func(node *restic.Node, target, location string) error { + return res.restoreNodeMetadataTo(node, target, location) + } + noop := func(node *restic.Node, target, location string) error { return nil } + + idx := restic.NewHardlinkIndex() + + filerestorer := newFileRestorer(dst, res.repo.Backend().Load, res.repo.Key(), filePackTraverser{lookup: res.repo.Index().Lookup}) + + // first tree pass: create directories and collect all files to restore + err = res.traverseTree(ctx, dst, string(filepath.Separator), *res.sn.Tree, treeVisitor{ + enterDir: func(node *restic.Node, target, location string) error { + // create dir with default permissions + // #leaveDir restores dir metadata after visiting all children + return fs.MkdirAll(target, 0700) + }, + + visitNode: func(node *restic.Node, target, location string) error { + // create parent dir with default permissions + // second pass #leaveDir restores dir metadata after visiting/restoring all children + err := fs.MkdirAll(filepath.Dir(target), 0700) + if err != nil { + return err + } + + if node.Type != "file" { + return nil + } + + if node.Size == 0 { + return nil // deal with empty files later + } + + if node.Links > 1 { + if idx.Has(node.Inode, node.DeviceID) { + return nil + } + idx.Add(node.Inode, node.DeviceID, location) + } + + filerestorer.addFile(location, node.Content) + + return nil + }, + leaveDir: noop, + }) + if err != nil { + return err + } + + err = filerestorer.restoreFiles(ctx, func(location string, err error) { res.Error(location, err) }) + if err != nil { + return err + } + + // second tree pass: restore special files and filesystem metadata + return res.traverseTree(ctx, dst, string(filepath.Separator), *res.sn.Tree, treeVisitor{ + enterDir: noop, + visitNode: func(node *restic.Node, target, location string) error { + if node.Type != "file" { + return res.restoreNodeTo(ctx, node, target, location) + } + + // create empty files, but not hardlinks to empty files + if node.Size == 0 && (node.Links < 2 || !idx.Has(node.Inode, node.DeviceID)) { + if node.Links > 1 { + idx.Add(node.Inode, node.DeviceID, location) + } + return res.restoreEmptyFileAt(node, target, location) + } + + if idx.Has(node.Inode, node.DeviceID) && idx.GetFilename(node.Inode, node.DeviceID) != location { + return res.restoreHardlinkAt(node, filerestorer.targetPath(idx.GetFilename(node.Inode, node.DeviceID)), target, location) + } + + return res.restoreNodeMetadataTo(node, target, location) + }, + leaveDir: restoreNodeMetadata, + }) +} + +// Snapshot returns the snapshot this restorer is configured to use. +func (res *Restorer) Snapshot() *restic.Snapshot { + return res.sn +} + +// VerifyFiles reads all snapshot files and verifies their contents +func (res *Restorer) VerifyFiles(ctx context.Context, dst string) (int, error) { + // TODO multithreaded? + + count := 0 + err := res.traverseTree(ctx, dst, string(filepath.Separator), *res.sn.Tree, treeVisitor{ + enterDir: func(node *restic.Node, target, location string) error { return nil }, + visitNode: func(node *restic.Node, target, location string) error { + if node.Type != "file" { + return nil + } + + count++ + stat, err := os.Stat(target) + if err != nil { + return err + } + if int64(node.Size) != stat.Size() { + return errors.Errorf("Invalid file size: expected %d got %d", node.Size, stat.Size()) + } + + file, err := os.Open(target) + if err != nil { + return err + } + + offset := int64(0) + for _, blobID := range node.Content { + blobs, _ := res.repo.Index().Lookup(blobID, restic.DataBlob) + length := blobs[0].Length - uint(crypto.Extension) + buf := make([]byte, length) // TODO do I want to reuse the buffer somehow? + _, err = file.ReadAt(buf, offset) + if err != nil { + _ = file.Close() + return err + } + if !blobID.Equal(restic.Hash(buf)) { + _ = file.Close() + return errors.Errorf("Unexpected contents starting at offset %d", offset) + } + offset += int64(length) + } + + err = file.Close() + if err != nil { + return err + } + + return nil + }, + leaveDir: func(node *restic.Node, target, location string) error { return nil }, + }) + + return count, err +} diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go new file mode 100644 index 000000000..70136bfe3 --- /dev/null +++ b/internal/restorer/restorer_test.go @@ -0,0 +1,696 @@ +package restorer + +import ( + "bytes" + "context" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +type Node interface{} + +type Snapshot struct { + Nodes map[string]Node + treeID restic.ID +} + +type File struct { + Data string + Links uint64 + Inode uint64 +} + +type Dir struct { + Nodes map[string]Node + Mode os.FileMode +} + +func saveFile(t testing.TB, repo restic.Repository, node File) restic.ID { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + id, err := repo.SaveBlob(ctx, restic.DataBlob, []byte(node.Data), restic.ID{}) + if err != nil { + t.Fatal(err) + } + + return id +} + +func saveDir(t testing.TB, repo restic.Repository, nodes map[string]Node, inode uint64) restic.ID { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tree := &restic.Tree{} + for name, n := range nodes { + inode++ + switch node := n.(type) { + case File: + fi := n.(File).Inode + if fi == 0 { + fi = inode + } + lc := n.(File).Links + if lc == 0 { + lc = 1 + } + fc := []restic.ID{} + if len(n.(File).Data) > 0 { + fc = append(fc, saveFile(t, repo, node)) + } + tree.Insert(&restic.Node{ + Type: "file", + Mode: 0644, + Name: name, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Content: fc, + Size: uint64(len(n.(File).Data)), + Inode: fi, + Links: lc, + }) + case Dir: + id := saveDir(t, repo, node.Nodes, inode) + + mode := node.Mode + if mode == 0 { + mode = 0755 + } + + tree.Insert(&restic.Node{ + Type: "dir", + Mode: mode, + Name: name, + UID: uint32(os.Getuid()), + GID: uint32(os.Getgid()), + Subtree: &id, + }) + default: + t.Fatalf("unknown node type %T", node) + } + } + + id, err := repo.SaveTree(ctx, tree) + if err != nil { + t.Fatal(err) + } + + return id +} + +func saveSnapshot(t testing.TB, repo restic.Repository, snapshot Snapshot) (*restic.Snapshot, restic.ID) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + treeID := saveDir(t, repo, snapshot.Nodes, 1000) + + err := repo.Flush(ctx) + if err != nil { + t.Fatal(err) + } + + err = repo.SaveIndex(ctx) + if err != nil { + t.Fatal(err) + } + + sn, err := restic.NewSnapshot([]string{"test"}, nil, "", time.Now()) + if err != nil { + t.Fatal(err) + } + + sn.Tree = &treeID + id, err := repo.SaveJSONUnpacked(ctx, restic.SnapshotFile, sn) + if err != nil { + t.Fatal(err) + } + + return sn, id +} + +// toSlash converts the OS specific path dir to a slash-separated path. +func toSlash(dir string) string { + data := strings.Split(dir, string(filepath.Separator)) + return strings.Join(data, "/") +} + +func TestRestorer(t *testing.T) { + var tests = []struct { + Snapshot + Files map[string]string + ErrorsMust map[string]map[string]struct{} + ErrorsMay map[string]map[string]struct{} + Select func(item string, dstpath string, node *restic.Node) (selectForRestore bool, childMayBeSelected bool) + }{ + // valid test cases + { + Snapshot: Snapshot{ + Nodes: map[string]Node{ + "foo": File{Data: "content: foo\n"}, + "dirtest": Dir{ + Nodes: map[string]Node{ + "file": File{Data: "content: file\n"}, + }, + }, + }, + }, + Files: map[string]string{ + "foo": "content: foo\n", + "dirtest/file": "content: file\n", + }, + }, + { + Snapshot: Snapshot{ + Nodes: map[string]Node{ + "top": File{Data: "toplevel file"}, + "dir": Dir{ + Nodes: map[string]Node{ + "file": File{Data: "file in dir"}, + "subdir": Dir{ + Nodes: map[string]Node{ + "file": File{Data: "file in subdir"}, + }, + }, + }, + }, + }, + }, + Files: map[string]string{ + "top": "toplevel file", + "dir/file": "file in dir", + "dir/subdir/file": "file in subdir", + }, + }, + { + Snapshot: Snapshot{ + Nodes: map[string]Node{ + "dir": Dir{ + Mode: 0444, + }, + "file": File{Data: "top-level file"}, + }, + }, + Files: map[string]string{ + "file": "top-level file", + }, + }, + { + Snapshot: Snapshot{ + Nodes: map[string]Node{ + "dir": Dir{ + Mode: 0555, + Nodes: map[string]Node{ + "file": File{Data: "file in dir"}, + }, + }, + }, + }, + Files: map[string]string{ + "dir/file": "file in dir", + }, + }, + { + Snapshot: Snapshot{ + Nodes: map[string]Node{ + "topfile": File{Data: "top-level file"}, + }, + }, + Files: map[string]string{ + "topfile": "top-level file", + }, + }, + { + Snapshot: Snapshot{ + Nodes: map[string]Node{ + "dir": Dir{ + Nodes: map[string]Node{ + "file": File{Data: "content: file\n"}, + }, + }, + }, + }, + Files: map[string]string{ + "dir/file": "content: file\n", + }, + Select: func(item, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { + switch item { + case filepath.FromSlash("/dir"): + childMayBeSelected = true + case filepath.FromSlash("/dir/file"): + selectedForRestore = true + childMayBeSelected = true + } + + return selectedForRestore, childMayBeSelected + }, + }, + + // test cases with invalid/constructed names + { + Snapshot: Snapshot{ + Nodes: map[string]Node{ + `..\test`: File{Data: "foo\n"}, + `..\..\foo\..\bar\..\xx\test2`: File{Data: "test2\n"}, + }, + }, + ErrorsMay: map[string]map[string]struct{}{ + `/`: { + `invalid child node name ..\test`: struct{}{}, + `invalid child node name ..\..\foo\..\bar\..\xx\test2`: struct{}{}, + }, + }, + }, + { + Snapshot: Snapshot{ + Nodes: map[string]Node{ + `../test`: File{Data: "foo\n"}, + `../../foo/../bar/../xx/test2`: File{Data: "test2\n"}, + }, + }, + ErrorsMay: map[string]map[string]struct{}{ + `/`: { + `invalid child node name ../test`: struct{}{}, + `invalid child node name ../../foo/../bar/../xx/test2`: struct{}{}, + }, + }, + }, + { + Snapshot: Snapshot{ + Nodes: map[string]Node{ + "top": File{Data: "toplevel file"}, + "x": Dir{ + Nodes: map[string]Node{ + "file1": File{Data: "file1"}, + "..": Dir{ + Nodes: map[string]Node{ + "file2": File{Data: "file2"}, + "..": Dir{ + Nodes: map[string]Node{ + "file2": File{Data: "file2"}, + }, + }, + }, + }, + }, + }, + }, + }, + Files: map[string]string{ + "top": "toplevel file", + }, + ErrorsMust: map[string]map[string]struct{}{ + `/x`: { + `invalid child node name ..`: struct{}{}, + }, + }, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + _, id := saveSnapshot(t, repo, test.Snapshot) + t.Logf("snapshot saved as %v", id.Str()) + + res, err := NewRestorer(repo, id) + if err != nil { + t.Fatal(err) + } + + tempdir, cleanup := rtest.TempDir(t) + defer cleanup() + + // make sure we're creating a new subdir of the tempdir + tempdir = filepath.Join(tempdir, "target") + + res.SelectFilter = func(item, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { + t.Logf("restore %v to %v", item, dstpath) + if !fs.HasPathPrefix(tempdir, dstpath) { + t.Errorf("would restore %v to %v, which is not within the target dir %v", + item, dstpath, tempdir) + return false, false + } + + if test.Select != nil { + return test.Select(item, dstpath, node) + } + + return true, true + } + + errors := make(map[string]map[string]struct{}) + res.Error = func(location string, err error) error { + location = toSlash(location) + t.Logf("restore returned error for %q: %v", location, err) + if errors[location] == nil { + errors[location] = make(map[string]struct{}) + } + errors[location][err.Error()] = struct{}{} + return nil + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + err = res.RestoreTo(ctx, tempdir) + if err != nil { + t.Fatal(err) + } + + for location, expectedErrors := range test.ErrorsMust { + actualErrors, ok := errors[location] + if !ok { + t.Errorf("expected error(s) for %v, found none", location) + continue + } + + rtest.Equals(t, expectedErrors, actualErrors) + + delete(errors, location) + } + + for location, expectedErrors := range test.ErrorsMay { + actualErrors, ok := errors[location] + if !ok { + continue + } + + rtest.Equals(t, expectedErrors, actualErrors) + + delete(errors, location) + } + + for filename, err := range errors { + t.Errorf("unexpected error for %v found: %v", filename, err) + } + + for filename, content := range test.Files { + data, err := ioutil.ReadFile(filepath.Join(tempdir, filepath.FromSlash(filename))) + if err != nil { + t.Errorf("unable to read file %v: %v", filename, err) + continue + } + + if !bytes.Equal(data, []byte(content)) { + t.Errorf("file %v has wrong content: want %q, got %q", filename, content, data) + } + } + }) + } +} + +func TestRestorerRelative(t *testing.T) { + var tests = []struct { + Snapshot + Files map[string]string + }{ + { + Snapshot: Snapshot{ + Nodes: map[string]Node{ + "foo": File{Data: "content: foo\n"}, + "dirtest": Dir{ + Nodes: map[string]Node{ + "file": File{Data: "content: file\n"}, + }, + }, + }, + }, + Files: map[string]string{ + "foo": "content: foo\n", + "dirtest/file": "content: file\n", + }, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + _, id := saveSnapshot(t, repo, test.Snapshot) + t.Logf("snapshot saved as %v", id.Str()) + + res, err := NewRestorer(repo, id) + if err != nil { + t.Fatal(err) + } + + tempdir, cleanup := rtest.TempDir(t) + defer cleanup() + + cleanup = fs.TestChdir(t, tempdir) + defer cleanup() + + errors := make(map[string]string) + res.Error = func(location string, err error) error { + t.Logf("restore returned error for %q: %v", location, err) + errors[location] = err.Error() + return nil + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + err = res.RestoreTo(ctx, "restore") + if err != nil { + t.Fatal(err) + } + + for filename, err := range errors { + t.Errorf("unexpected error for %v found: %v", filename, err) + } + + for filename, content := range test.Files { + data, err := ioutil.ReadFile(filepath.Join(tempdir, "restore", filepath.FromSlash(filename))) + if err != nil { + t.Errorf("unable to read file %v: %v", filename, err) + continue + } + + if !bytes.Equal(data, []byte(content)) { + t.Errorf("file %v has wrong content: want %q, got %q", filename, content, data) + } + } + }) + } +} + +type TraverseTreeCheck func(testing.TB) treeVisitor + +type TreeVisit struct { + funcName string // name of the function + location string // location passed to the function +} + +func checkVisitOrder(list []TreeVisit) TraverseTreeCheck { + var pos int + + return func(t testing.TB) treeVisitor { + check := func(funcName string) func(*restic.Node, string, string) error { + return func(node *restic.Node, target, location string) error { + if pos >= len(list) { + t.Errorf("step %v, %v(%v): expected no more than %d function calls", pos, funcName, location, len(list)) + pos++ + return nil + } + + v := list[pos] + + if v.funcName != funcName { + t.Errorf("step %v, location %v: want function %v, but %v was called", + pos, location, v.funcName, funcName) + } + + if location != filepath.FromSlash(v.location) { + t.Errorf("step %v: want location %v, got %v", pos, list[pos].location, location) + } + + pos++ + return nil + } + } + + return treeVisitor{ + enterDir: check("enterDir"), + visitNode: check("visitNode"), + leaveDir: check("leaveDir"), + } + } +} + +func TestRestorerTraverseTree(t *testing.T) { + var tests = []struct { + Snapshot + Select func(item string, dstpath string, node *restic.Node) (selectForRestore bool, childMayBeSelected bool) + Visitor TraverseTreeCheck + }{ + { + // select everything + Snapshot: Snapshot{ + Nodes: map[string]Node{ + "dir": Dir{Nodes: map[string]Node{ + "otherfile": File{Data: "x"}, + "subdir": Dir{Nodes: map[string]Node{ + "file": File{Data: "content: file\n"}, + }}, + }}, + "foo": File{Data: "content: foo\n"}, + }, + }, + Select: func(item string, dstpath string, node *restic.Node) (selectForRestore bool, childMayBeSelected bool) { + return true, true + }, + Visitor: checkVisitOrder([]TreeVisit{ + {"enterDir", "/dir"}, + {"visitNode", "/dir/otherfile"}, + {"enterDir", "/dir/subdir"}, + {"visitNode", "/dir/subdir/file"}, + {"leaveDir", "/dir/subdir"}, + {"leaveDir", "/dir"}, + {"visitNode", "/foo"}, + }), + }, + + // select only the top-level file + { + Snapshot: Snapshot{ + Nodes: map[string]Node{ + "dir": Dir{Nodes: map[string]Node{ + "otherfile": File{Data: "x"}, + "subdir": Dir{Nodes: map[string]Node{ + "file": File{Data: "content: file\n"}, + }}, + }}, + "foo": File{Data: "content: foo\n"}, + }, + }, + Select: func(item string, dstpath string, node *restic.Node) (selectForRestore bool, childMayBeSelected bool) { + if item == "/foo" { + return true, false + } + return false, false + }, + Visitor: checkVisitOrder([]TreeVisit{ + {"visitNode", "/foo"}, + }), + }, + { + Snapshot: Snapshot{ + Nodes: map[string]Node{ + "aaa": File{Data: "content: foo\n"}, + "dir": Dir{Nodes: map[string]Node{ + "otherfile": File{Data: "x"}, + "subdir": Dir{Nodes: map[string]Node{ + "file": File{Data: "content: file\n"}, + }}, + }}, + }, + }, + Select: func(item string, dstpath string, node *restic.Node) (selectForRestore bool, childMayBeSelected bool) { + if item == "/aaa" { + return true, false + } + return false, false + }, + Visitor: checkVisitOrder([]TreeVisit{ + {"visitNode", "/aaa"}, + }), + }, + + // select dir/ + { + Snapshot: Snapshot{ + Nodes: map[string]Node{ + "dir": Dir{Nodes: map[string]Node{ + "otherfile": File{Data: "x"}, + "subdir": Dir{Nodes: map[string]Node{ + "file": File{Data: "content: file\n"}, + }}, + }}, + "foo": File{Data: "content: foo\n"}, + }, + }, + Select: func(item string, dstpath string, node *restic.Node) (selectForRestore bool, childMayBeSelected bool) { + if strings.HasPrefix(item, "/dir") { + return true, true + } + return false, false + }, + Visitor: checkVisitOrder([]TreeVisit{ + {"enterDir", "/dir"}, + {"visitNode", "/dir/otherfile"}, + {"enterDir", "/dir/subdir"}, + {"visitNode", "/dir/subdir/file"}, + {"leaveDir", "/dir/subdir"}, + {"leaveDir", "/dir"}, + }), + }, + + // select only dir/otherfile + { + Snapshot: Snapshot{ + Nodes: map[string]Node{ + "dir": Dir{Nodes: map[string]Node{ + "otherfile": File{Data: "x"}, + "subdir": Dir{Nodes: map[string]Node{ + "file": File{Data: "content: file\n"}, + }}, + }}, + "foo": File{Data: "content: foo\n"}, + }, + }, + Select: func(item string, dstpath string, node *restic.Node) (selectForRestore bool, childMayBeSelected bool) { + switch item { + case "/dir": + return false, true + case "/dir/otherfile": + return true, false + default: + return false, false + } + }, + Visitor: checkVisitOrder([]TreeVisit{ + {"visitNode", "/dir/otherfile"}, + }), + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + sn, id := saveSnapshot(t, repo, test.Snapshot) + + res, err := NewRestorer(repo, id) + if err != nil { + t.Fatal(err) + } + + res.SelectFilter = test.Select + + tempdir, cleanup := rtest.TempDir(t) + defer cleanup() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // make sure we're creating a new subdir of the tempdir + target := filepath.Join(tempdir, "target") + + err = res.traverseTree(ctx, target, string(filepath.Separator), *sn.Tree, test.Visitor(t)) + if err != nil { + t.Fatal(err) + } + }) + } +} diff --git a/internal/restorer/restorer_unix_test.go b/internal/restorer/restorer_unix_test.go new file mode 100644 index 000000000..fc80015c1 --- /dev/null +++ b/internal/restorer/restorer_unix_test.go @@ -0,0 +1,61 @@ +//+build !windows + +package restorer + +import ( + "context" + "os" + "path/filepath" + "syscall" + "testing" + + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func TestRestorerRestoreEmptyHardlinkedFileds(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + _, id := saveSnapshot(t, repo, Snapshot{ + Nodes: map[string]Node{ + "dirtest": Dir{ + Nodes: map[string]Node{ + "file1": File{Links: 2, Inode: 1}, + "file2": File{Links: 2, Inode: 1}, + }, + }, + }, + }) + + res, err := NewRestorer(repo, id) + rtest.OK(t, err) + + res.SelectFilter = func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) { + return true, true + } + + tempdir, cleanup := rtest.TempDir(t) + defer cleanup() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + err = res.RestoreTo(ctx, tempdir) + rtest.OK(t, err) + + f1, err := os.Stat(filepath.Join(tempdir, "dirtest/file1")) + rtest.OK(t, err) + rtest.Equals(t, int64(0), f1.Size()) + s1, ok1 := f1.Sys().(*syscall.Stat_t) + + f2, err := os.Stat(filepath.Join(tempdir, "dirtest/file2")) + rtest.OK(t, err) + rtest.Equals(t, int64(0), f2.Size()) + s2, ok2 := f2.Sys().(*syscall.Stat_t) + + if ok1 && ok2 { + rtest.Equals(t, s1.Ino, s2.Ino) + } +} diff --git a/internal/selfupdate/download.go b/internal/selfupdate/download.go new file mode 100644 index 000000000..888007c4c --- /dev/null +++ b/internal/selfupdate/download.go @@ -0,0 +1,178 @@ +package selfupdate + +import ( + "archive/zip" + "bufio" + "bytes" + "compress/bzip2" + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/pkg/errors" +) + +func findHash(buf []byte, filename string) (hash []byte, err error) { + sc := bufio.NewScanner(bytes.NewReader(buf)) + for sc.Scan() { + data := strings.Split(sc.Text(), " ") + if len(data) != 2 { + continue + } + + if data[1] == filename { + h, err := hex.DecodeString(data[0]) + if err != nil { + return nil, err + } + + return h, nil + } + } + + return nil, fmt.Errorf("hash for file %v not found", filename) +} + +func extractToFile(buf []byte, filename, target string, printf func(string, ...interface{})) error { + var mode = os.FileMode(0755) + + // get information about the target file + fi, err := os.Lstat(target) + if err == nil { + mode = fi.Mode() + } + + var rd io.Reader = bytes.NewReader(buf) + switch filepath.Ext(filename) { + case ".bz2": + rd = bzip2.NewReader(rd) + case ".zip": + zrd, err := zip.NewReader(bytes.NewReader(buf), int64(len(buf))) + if err != nil { + return err + } + + if len(zrd.File) != 1 { + return errors.New("ZIP archive contains more than one file") + } + + file, err := zrd.File[0].Open() + if err != nil { + return err + } + + defer func() { + _ = file.Close() + }() + + rd = file + } + + err = os.Remove(target) + if os.IsNotExist(err) { + err = nil + } + if err != nil { + return fmt.Errorf("unable to remove target file: %v", err) + } + + dest, err := os.OpenFile(target, os.O_CREATE|os.O_EXCL|os.O_WRONLY, mode) + if err != nil { + return err + } + + n, err := io.Copy(dest, rd) + if err != nil { + _ = dest.Close() + _ = os.Remove(dest.Name()) + return err + } + + err = dest.Close() + if err != nil { + return err + } + + printf("saved %d bytes in %v\n", n, dest.Name()) + return nil +} + +// DownloadLatestStableRelease downloads the latest stable released version of +// restic and saves it to target. It returns the version string for the newest +// version. The function printf is used to print progress information. +func DownloadLatestStableRelease(ctx context.Context, target, currentVersion string, printf func(string, ...interface{})) (version string, err error) { + if printf == nil { + printf = func(string, ...interface{}) {} + } + + printf("find latest release of restic at GitHub\n") + + rel, err := GitHubLatestRelease(ctx, "restic", "restic") + if err != nil { + return "", err + } + + if rel.Version == currentVersion { + printf("restic is up to date\n") + return currentVersion, nil + } + + printf("latest version is %v\n", rel.Version) + + _, sha256sums, err := getGithubDataFile(ctx, rel.Assets, "SHA256SUMS", printf) + if err != nil { + return "", err + } + + _, sig, err := getGithubDataFile(ctx, rel.Assets, "SHA256SUMS.asc", printf) + if err != nil { + return "", err + } + + ok, err := GPGVerify(sha256sums, sig) + if err != nil { + return "", err + } + + if !ok { + return "", errors.New("GPG signature verification of the file SHA256SUMS failed") + } + + printf("GPG signature verification succeeded\n") + + ext := "bz2" + if runtime.GOOS == "windows" { + ext = "zip" + } + + suffix := fmt.Sprintf("%s_%s.%s", runtime.GOOS, runtime.GOARCH, ext) + downloadFilename, buf, err := getGithubDataFile(ctx, rel.Assets, suffix, printf) + if err != nil { + return "", err + } + + printf("downloaded %v\n", downloadFilename) + + wantHash, err := findHash(sha256sums, downloadFilename) + if err != nil { + return "", err + } + + gotHash := sha256.Sum256(buf) + if !bytes.Equal(wantHash, gotHash[:]) { + return "", fmt.Errorf("SHA256 hash mismatch, want hash %02x, got %02x", wantHash, gotHash) + } + + err = extractToFile(buf, downloadFilename, target, printf) + if err != nil { + return "", err + } + + return rel.Version, nil +} diff --git a/internal/selfupdate/github.go b/internal/selfupdate/github.go new file mode 100644 index 000000000..541112abc --- /dev/null +++ b/internal/selfupdate/github.go @@ -0,0 +1,170 @@ +package selfupdate + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "time" + + "github.com/pkg/errors" + "golang.org/x/net/context/ctxhttp" +) + +// Release collects data about a single release on GitHub. +type Release struct { + Name string `json:"name"` + TagName string `json:"tag_name"` + Draft bool `json:"draft"` + PreRelease bool `json:"prerelease"` + PublishedAt time.Time `json:"published_at"` + Assets []Asset `json:"assets"` + + Version string `json:"-"` // set manually in the code +} + +// Asset is a file uploaded and attached to a release. +type Asset struct { + ID int `json:"id"` + Name string `json:"name"` + URL string `json:"url"` +} + +func (r Release) String() string { + return fmt.Sprintf("%v %v, %d assets", + r.TagName, + r.PublishedAt.Local().Format("2006-01-02 15:04:05"), + len(r.Assets)) +} + +const githubAPITimeout = 30 * time.Second + +// githubError is returned by the GitHub API, e.g. for rate-limiting. +type githubError struct { + Message string +} + +// GitHubLatestRelease uses the GitHub API to get information about the latest +// release of a repository. +func GitHubLatestRelease(ctx context.Context, owner, repo string) (Release, error) { + ctx, cancel := context.WithTimeout(ctx, githubAPITimeout) + defer cancel() + + url := fmt.Sprintf("https://api.github.com/repos/%s/%s/releases/latest", owner, repo) + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return Release{}, err + } + + // pin API version 3 + req.Header.Set("Accept", "application/vnd.github.v3+json") + + res, err := ctxhttp.Do(ctx, http.DefaultClient, req) + if err != nil { + return Release{}, err + } + + if res.StatusCode != http.StatusOK { + content := res.Header.Get("Content-Type") + if strings.Contains(content, "application/json") { + // try to decode error message + var msg githubError + jerr := json.NewDecoder(res.Body).Decode(&msg) + if jerr == nil { + return Release{}, fmt.Errorf("unexpected status %v (%v) returned, message:\n %v", res.StatusCode, res.Status, msg.Message) + } + } + + _ = res.Body.Close() + return Release{}, fmt.Errorf("unexpected status %v (%v) returned", res.StatusCode, res.Status) + } + + buf, err := ioutil.ReadAll(res.Body) + if err != nil { + _ = res.Body.Close() + return Release{}, err + } + + err = res.Body.Close() + if err != nil { + return Release{}, err + } + + var release Release + err = json.Unmarshal(buf, &release) + if err != nil { + return Release{}, err + } + + if release.TagName == "" { + return Release{}, errors.New("tag name for latest release is empty") + } + + if !strings.HasPrefix(release.TagName, "v") { + return Release{}, errors.Errorf("tag name %q is invalid, does not start with 'v'", release.TagName) + } + + release.Version = release.TagName[1:] + + return release, nil +} + +func getGithubData(ctx context.Context, url string) ([]byte, error) { + ctx, cancel := context.WithTimeout(ctx, githubAPITimeout) + defer cancel() + + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return nil, err + } + + // request binary data + req.Header.Set("Accept", "application/octet-stream") + + res, err := ctxhttp.Do(ctx, http.DefaultClient, req) + if err != nil { + return nil, err + } + + if res.StatusCode != http.StatusOK { + return nil, fmt.Errorf("unexpected status %v (%v) returned", res.StatusCode, res.Status) + } + + buf, err := ioutil.ReadAll(res.Body) + if err != nil { + _ = res.Body.Close() + return nil, err + } + + err = res.Body.Close() + if err != nil { + return nil, err + } + + return buf, nil +} + +func getGithubDataFile(ctx context.Context, assets []Asset, suffix string, printf func(string, ...interface{})) (filename string, data []byte, err error) { + var url string + for _, a := range assets { + if strings.HasSuffix(a.Name, suffix) { + url = a.URL + filename = a.Name + break + } + } + + if url == "" { + return "", nil, fmt.Errorf("unable to find file with suffix %v", suffix) + } + + printf("download %v\n", filename) + data, err = getGithubData(ctx, url) + if err != nil { + return "", nil, err + } + + return filename, data, nil +} diff --git a/internal/selfupdate/verify.go b/internal/selfupdate/verify.go new file mode 100644 index 000000000..8db93fe8b --- /dev/null +++ b/internal/selfupdate/verify.go @@ -0,0 +1,187 @@ +package selfupdate + +import ( + "bytes" + "fmt" + + "golang.org/x/crypto/openpgp" +) + +var key = []byte(` +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBFRVIb8BEADUex/4rH/aeR3CN044zqFD45SKUh/8pC44Bw85iRSSE9xEZsLB +LUF6ZtT3HNXfxh7TRpTeHnXABnr8EtNwsmMjItDaSClf5jM0qKVfRIHBZ2N539oF +lHiCEsg+Q6kJEXHSbqder21goihfcjJBVKFX6ULgCbymOu03fzbhe/m5R57gDU2H ++gcgoI6a5ib11oq2pRdbC9NkEg7YXHbMlZ5s6fIAgklyDQqAlH8QNiRYcyC/4NrG +WXLwUTDssFn3hoJlAxZwj+dRZAit6Hgj2US05Ra/gJqZWzKyE2ywglO9sc2wD3sE +0Ti1tS9VJr7WNcZzVMXj1qBIlBkl4/E5tIiNEZ5BrAhmdSYbZvP2cb6RFn5clKh9 +i+XpeBIGiuAUgXTcV/+OBHjLq+Aeastktk7zaZ9QQoRMHksG02hPI7Z7iIRrhhgD +xsM2XAkwZXp21lpZtkEGYc2qo5ddu+qdZ1tHf5HqJ4JHj2hoRdr4nL6cwA8TlCSc +9PIifkKWVhMSEnkF2PXi+FZqkPnt1sO27Xt5i3BuaWmWig6gB0qh+7sW4o371MpZ +8SPKZgoFA5kJlqkOoSwZyY4M7TRR+GbZuZARUS+BTLsAeJ5Gik9Lhe1saE5UGncf +wYmh+sOi4vRDyoSkPthnBvvlmHp7yo7MiNAUPWHiuv2FWU0rPwB05NOinQARAQAB +tChBbGV4YW5kZXIgTmV1bWFubiA8YWxleGFuZGVyQGJ1bXBlcm4uZGU+iQI6BBMB +CAAkAhsDBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheABQJUVSmNAhkBAAoJEJGmhovT +96kHQUcQALfi9KohoE0JFkKfSXl5jbBJkTt38srMnZ6xKP45F0e/ir1duFVCSyhZ ++YS/n6aBMQl/qRWbzF+93RnGsTLvMi/8Oa72czlEPuYYfFPuJAatxvA/TFZHuI++ +u6xAF4Oxlq0FAbEJfpw0uLSDuU9f9TlLYNP3hLudjFFd9sJGLLs+SCeomPRKFxRR +LL7/1EzdtmvvFhZZcuPsTamBb4oi+1usCO5RW1AQA5A4Qo4gHitBaSaBgolFZLN7 +6UFBwBs/t0hDZPAAZa1T8EpjQrlmINFIeBYFdvjhMChGQc6NcfOOQofW5BDVn6Gs +BHYTvAgSK5G0eaB+bOAtv9LW9hDt05iEJaE5ojPT7ThicHoU65WL4yGAGCGcfNm+ +EpuNGt1IgAFGGxX6wMZy59WqtMBZANjWQdrDbCPQa3pIID96iN0A1HZJg7eAl0y3 +NM6nU7faAuW4QOoQRgxOTj0+fM6khaFmYCp5nuer3d5pkaH6SQG4ZDVOOSaak7Ql +T/EFSfz2B5FZN1OIdqw5/Aw7HugOehHund5QfgRuDLSqZKnuGrIo9OwJIirT/TDD +nsNWBTN3Pxf1h8Iut+R9Zt7LwsVjVN9+JPL8yEk4zzCxHEL4/2c6jANQdtbQCZiH +bU85JWe1NKFo/NNPpM2ysZMpKHe5RB3FLMare0IBs5BO06nyGpTmiEYEEBEIAAYF +AlRVKToACgkQFBE43aPkXWafmgCfcR+LfsAn6aZxjX46Km5tmWpDVrAAoJfHpfBG +5MEki2MOoKvEsWDZqhHSiQIcBBABCAAGBQJUZRF9AAoJEGo6ELMcEJUXrtIP/iJh +0i7VaQG2F+QqGW8Evqng33HeLzW9BvYxt3X1QNb4ylFIc51Hl33yh5ZkPmZk/I3m +BaDd23IE2rhzETxDGrAMnE6zeaQ+iTu6iySBxqHjtK0HwKObuBA4Sw803Hn31Owa +Z8a3TEUkyiHPh8NBuxbvXNuOrxsglATE4KCuUGjGdmNs1raG9mqSUXgZCh1q1kAI +NN6O9DFFS1RsAvNK0qmTZZMfHWZeu10O55MHsxTsfPY/v1Jphg+vHc2NItw0s23R +j6SJN5fgNSLhcKBdCRpw33YFy+EWA8lE2FRd5DStn2sNWvAOoWLrIHZo0UgrgFV2 +gi4QpaN+b/T+QDiq7IcwLaMSWU3ODYIFN2C/TBKZRIC7LWQPG0cjFJd/kWDQWB+i +/MdYMOOuDo6ohh3vfkC7xNEo3lJArC3Zgf4SmO6OBMnIdYjdchDV8dn5lSbKq1A3 +20FIUWIxdkfx4L88J3KOGMAmuZxmnWkKN6iEg+Pb/axuX0cHSx/j7vV01YY2Z6j6 +98tKhP2XObH990Eqfr8zJcj0tCuKEHww7Pn8aH9BHvig5KeEAIbggW34jR9TUKXd +1ao5HX0pKSa/37OqlG4r+XUORCV7awNSuUHU8BR0emDCsgRdrQ4szRgJLOA8sP14 +b9fz+xO3KKNrDXGYZLXFSVwGzrSC5zbh6QkC0qeYiQIcBBABCgAGBQJUamlAAAoJ +EFKZtJkEN6xByy4QAMQJ45eOJtVyGtCKtIr2mPZQ0VI60hfMB6PVFlb7DOpWIHoh +Nl6KWzZunENelXp+VNQcj6St5iZrdOiyiMOY/FdN0rhPAYWERchABd9WDS9ycBr8 +n5kWmB36Wa2r/aTOlDYJ/botigS6To6bR6Gc9FEj4QuVnmqzMlawSz/O0HNS0Hej +DgUwgR3hCDAAp/Hw9PR1CRcHw2bo/B5+GEcl+w6iAkheGXuV2zSWXf6LRLRSEQ70 +f6n4hs6vsuQQ35yd4UXy/t/q3l7/xeJ5TBWWiXviQK1tIOsUJ+/cCpWzms+IFvt+ +UsTQBMMuKBFqjkl4oDgtv8vf1i2NZsNo/XbzPB4hua5HyBuhn0/ej9zMfmMvfqZG +6ZzaAGpZYRCVRcw3nc0yNnoW+g7pAJs1M3sL1BXpUGfROG/T3yFzL+sk62moG2tD +G2G7QsNVzOxRDi8bax5f3U7QW1i33o3qRbr9BfePyWtfVuWHisTw1rBdwwEAfYQs +YUSDMXcTB9LhUWAhJAtEeLz0GOaA+IlafVwqmIdLxTsUoNYfpgRi2Wic33oRI1UG +yENtzKUu1iaHvSCEd/obvrhpx353oI4Yq8Gkr8mWRptX9IGXa+qASZ9gMxJ1PoPA +dLG5/oECVC/ORaNAL3zY9SbmGWamcWgSAeIB3iJxQlyMYikLDzb390y+5AFXiQI3 +BBMBCAAhBQJUVSG/AhsDBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEJGmhovT +96kH8pAP/3PtPkxkYNaS+PXiHBDpDnUsBiwvZt3FsebexepAepzL8dP00oNyI7dP +F2XKm4e/PHJ0nnY+tD4KKRdBeiQOL5ZywHmxZX04P1/Z6uCbVpGCSovcWavBkP8A +k+/CjzJUA6Z/s17D6LIpDDntn6v0abRoTy3asexG277udP+iO+1q/mnxSiSZzNas +rh973gXSeqL3oV+oY6DCSPpOSJlbI85UMU5/WnCxPIVHvaDG8Fv5RF74d3+FVJKd +7TRnUsqZ4MLI++JNXwK0O8dCQ8NsB3NF2rDnND+zhzDlisvdiyGsQUNMnn1Czi4D +/MK2/2xkdoVzKNA0v3IHlnxMWhZVLaHqUiGYUGF6NsB+OgXEEJRGIpukYnd/+WkL +Qqfzyy6Y6uUDhkwz0G5aDGyNUg7+gfDMr5dy+HxtgEzkcoZJWuNBzO0Yp198QNNE ++QBxu9OkYw9A8wT58cHVuzFU0V+bTBrZtpbME8OWLy6+eDXn6CbVEu2Fc92ckQoz +EpRdZMdiWVtbQDY8L9qAiC+BOVqBgv5PoB8IVHrV1GmRZwxRdlplnzviWa5Qvke5 +dcUy+DXmrCN+dWUql8fFt2g6EIncFYotwxz7r3+KdjCFKzG6zmMLHBCUz0exAxrH +4vXXB1LdEByddgjXcol+N73D4jUyYvs12ziecGitAU8z9nYK347XiQIcBBABAgAG +BQJWBrfPAAoJEB5F+Mqd4jsWKCoQAIgC4I+rQ1UivkF1sk0A6IUvWAP4yfNBM6UP +K5bxFLd/kjL9LiiWD74NVmQ8WAa1Y3cv/gVrmekcVhxXGf3bZcGKBnYmmpR0xWli +XxQJ9CGPwtoaN7ZDKPbOKFzOHX3DiCZ9ePrxwpgym5iu541X2aGH/cqh9bPS4vxv +Nez+5fwhSdDe6iJZ09/oiJuMkyir3SKx3SJRf3Yd2G2k2bPJae2NjiolKIJrgNMe +SSYahaMHF8i+UpUQMqXK4vAWPFML9DzwjJVbnJuJ8s9T/5wy3Lw33Fpe33a8bTon +bEk60+NwhlnRAho0J5At9LVpTUuA0+5LAT2kwAzk2dPzYJl9SVYOQWP8Zj4tEDGF +hQPfdnYMhQB2ECTrtLBLJbqnEbCgRA7VTlGnsb+PU+Ut1GLhglPFPRjfAhWRKBLe +9sDYn8icrhJqvEyc8YMjeBSMEuQUm65b0fjUUl9eBSdRxy2RkQiPTg+o8kLOOnt6 ++ar3S+JXIcN4GpLfBt5cpBiU53TkuTJYHqIHqKyLgEfIIfNRrKTbK6sCfA5STKTf +JSmewY2vGM7D4njQ2Iz8a4SU/XFOWQP0zlehDe1jhLXqYBlYMyXoULkXLkMfmIZX +AHoVn7z1POa94NcKePpW2BFm4Q0OjrwY2/ufPF/RlB4qNiFsrVuWpL7eMzaMZ+JV +oZXxEPMRtChBbGV4YW5kZXIgTmV1bWFubiA8YWxleGFuZGVyQGRlYmlhbi5vcmc+ +iQIfBDABCAAJBQJW9SIDAh0gAAoJEJGmhovT96kHrP0P/24pnzm7zUyMFjUuZbsc +JxNk31K/gSWQ6S5AMPeKB/ar5OMRMkmpZZmOX8c1Q1MxdGdRGPFzA++uWPiizc3Q +LQIrzI1Q2oarkjcb3FMOMpn4M5xZp/+dmuWSrgEEF3iPom/DjpE+U/DC6/YaeJJO +WLuiU799c8b9Qg+ZZcf5L1vUMT489kDL8FgwiThoAXQ4LgSylblguVNkSiyZAQ7g +0snYD93jdBvY2KSIQ1Y9mIZPZYcZacj+CVMMAQOAP6WmrOw6hREaYFo/0Z9tMC0Q +Fba2hwAISS/hrBPFCFalq9E0tqClryitXdJp0/k8QgU979pANJXmZCvmFhjcCIKg +9ok7+lykFmbo+UCmRRoYoLlaw4wNfuN3TIlDyWx7cfAVww+AwQD8E1k6jXJpqT5s +Y+NSbJ2bPRR+AQk3qkvU2dJqOIJxF02jp4a4QxypTAN+byCkJcnrl7XMcykAeCAf +XIA5xRoZu44WJhHmTIAMf5SLzk889MggQrGVKckOpvSaFDElqW54DY/erkwFiZKd +t0rOmvqY4/63Btw6x7Y63THp4xf5IvFf0REc/Eh5aC0gPilHPS9ZbuIh0tX4hrQY +J2SPQ5bU63XC+ucJrHde25dDEa9oQ/xny3Dd233j8ofdLuBKejXXjhD/Dv3nlAEZ +D9VQgaF4kQcpqkz+dsgzEA3IiQI3BBMBCAAhBQJUVSlqAhsDBQsJCAcDBRUKCQgL +BRYCAwEAAh4BAheAAAoJEJGmhovT96kH/JMQAIQLk13LPm2K7YvLrO0iq8MbM/YK +pUh97sms5ItVMZJm3tGmbc4bgOJ2zAfeRRoumMqIyv2KLuXNKdysoGIowKvukOEK +v3IFv1pIXYwQ7KrRa+Pn1kfpjgoOePN/gm5fbGZTgRe65a9XhkBPKB4emv3hrX2b +WFMxtkbzDyP03oshTO/tpBFMNV+XA/Zlz3fLzvICUzD1SHTzzTFACyFkiB68Yx4y +UXXAln7LCXzHsdiM/3EuloiDZtao5t1Ax0+GEbo7WL3bIoR8e1q4d/PgbKPQvASH +jw/s0S1gCwOnFeoShrn2ijp2/5XLVjx4hQ/CCv+RJxv1lNzmmZtBBMGHbl1U5rcE +Ys/Fpe1If1RyC83mimmMfGS5TTXOMWbqjNlRT9bLRM/+OLvS6FWuAuFogQkCc+pa +VKHSEQJiN2+2XbjfrrozubB6Icegp2RwKyi9BRre9V6NPfEm1C8d6hmloqsK5RHX +z62a25sH4mEufTxgYn7TCxx+wckBWOlDe3p2i1lJDv1SKQXN0ZANfmObdfUMYyhZ +/j/ariRx1uhSrgPzQoRBaDMa/klqGyWQ9Yh1nJdeKwPZo4zriZvK5LAgnWh2IRcG +iPtSdtdk5KaEAouUR8XNGEpL79+Awi3kjd5uEmnu+ZqDDM/hq3NgzbQ51PuwBeuH +pe+6S4onnc5Sh5yoiEYEEBEIAAYFAlRVKXwACgkQFBE43aPkXWYUtQCfW61UqGPh +e0atXSnkzEevKm6y99QAn1CZ4rCVg4u/Zp6nvKncdd3cs0/NiQIcBBABCAAGBQJU +ZRF9AAoJEGo6ELMcEJUXk0gP/RJg5pLpPNibz09kjwsOJAlpORyd6OBxE4tOn/bE +CT0mE6vBg0TdY+MO1IC0o5RkiAc9f5YizzYyLBVpfgwdrfCk+eD9mFKhn7szYI23 +2AdXIO5ziC4pND+zdkSj37fxAcM4BIfeyHWKna/cmkM1tsmB1YYpxNpM56Y+ulAG +6YJBkU0hPoUjI7eNMsV/+V/IOSuP4Z/Lhw1fw4bKow0zVc20C+dbgrBz9uKGUrmT +jLMLoEiaxn7yrYug8kQeeKaEoczyQhivQrKFZsfRMkiGaRz7qeOWrw10MCPNa3LX +wswXpxM9FGLnflOwhUiYSgD1OdmBaEEntDPX75Dp0n8pdm5GNFCuD9RpJrGIiPm5 +dsU8kRMeFbUQFNOkJE5Npxv7DrmTIjFd90U3kwcwEL0Y++W+q/lbrmxgOuYmc/c6 +K8WVGjsOTpEuqFvmLmhIwxzH4QCtSUTb/O2bg7PIbAk2LVMbXi4H9Fxl8YCWb532 +Tu2RQ73Odvmluhj+QTFnxglEd4xiOlttwIOwqQAgLBk/GSioFfgLaGla5iabGaWu +MB6zFiDp30IDHfIchUp/jaBvWJf33UaemryRrppVv1mgs6qvKxbGmYSOn7I8KBas +iyV2IXaUCHDdreFcCLJrl/Cso9qQcHroI127IQAB5upyN3TuS0RS/ZnAZc9yd0Jx +kFNHiQIcBBABCgAGBQJUammYAAoJEFKZtJkEN6xBoC8P/2ipNFdW6rMuISzGUcGs +CQVNcil/9mZ+iOqe+7DS356vJmENvof31r2/tTHUcJcRoh7ANkR0YuvZylD8MFXk +jrAj+X2ODSCsaugyjxWEg5XEYLnHipX7eFxzT39UJrgP/4wNu8tWDO6t/xhblHUi +chE1tvWZkUnWzhQrBKIiYGZnu0mxIEHR33PZauc4vFL2U0K8deKpo01jtbz9f8+n +grcTplCfCJ8H0SoR/8t4qyg2FNgcnJW7F/VVa3j6ctDBkB+NcPYjeRL8cybHV0VU +xbbG64D+WbqsspWDRj7799SELQ5emUnyok0j/e+3ffFkiKP0mpMX3RW2MfMxd1Bs +AoY3IqvIdlAjrtAY2tQ3sXAyPgmcp6kKRKixLTbBjGfrptNLO2nzADvHk3/4OnKl +tlYj406A7ZgeKDWku+yQ8VSPCeFh86KwiQBgoZOJiQSRWYGT2hy8xLb4im4W1cor +bfL+2+iVMu+EwQ6QFlyzaLgNPNWBXUWvK8vok55LNuHYKHxCJD3b53vBctmB08/U +piRMDT5JTolyfwc4zbFrgb4d5lvTP0bM2qIHoEde5GyDKaUZkHvbBKokkR7nMKhK +mfWs091mG5AP03NXdmt/mllv3bRPsbJJTP0m8BMliQjvPIhKk7ngbNHefzGCdv06 +psDi8Q8apCdR2bLDaLp2HC4TiQIcBBABAgAGBQJWBrfPAAoJEB5F+Mqd4jsWMmYP +/0izNHAqY8HvpyM7SlWixvXI2tjWSlhiC8dLv77rDLTjW/XbKh8P+6abxaPBg9dF +xHGDBJli0U9J+Mp5AodB+b/xgA8ro/U5sGGvTVI02AE9ohPwR2W6xePOapmkyWxO +P4kfEP8bK2V/JnBdk8Rq6tce5onBWTrFQCcqs2OprlfPpbKZgQ6b/K7nNP9uv2ku +twhSxmw4NpKJmvGzf1HnWVQKaE4yqCoH9pJGyQmn+v8JBytkRIsAhlV/HKC8Toz5 +x933qAHVMW/Xf7hYWu9a15rS8NSBMOEFmvKubxYbqEPYM5uXENDCa+oGSbx52crf +IoOfiWWSiDyV502kZ4WB0jOdwITCDp6D/Hq4HZMnoquZ9Kk7mRuOP3FGXtAmp3rY +9TGBcjH2hjA1xen0N59uaE+zNeU0wEhrSMbwUvsFt6Z/p8aS8TMZIayVbmGi/0Vf +UonjIx3OPsd08CfXTk9cY+R3RKoFaJBBa2ZK2XnXj7VgbdcYx7IV4G41cxOHCgCl +BAMyzKI+UHuN4H39sQixFJW7tF0QJrYaXamuzIyh99qy5b0NvWxCiStZOKglmq2P +61ryqzyPcbwEn+1OeXUXayZslU8M2SMRfA3qXaBUuH5fQgVzCm8DndwdGRbh0ceo +E4BtZkZ3hmWvpPgt177eLcg7plL176bnjV0llspg+ji5uQINBFRVIb8BEADo4td9 +MrPJd0wt2Q0OPgdAOyxpwGgu2vh8TTL5sUsMpJEKRQdc5AyEI1/mrTINDVgTSjTd +VPQE8fb4w3GHAUg4iBPucyGLUpQd+pxYya/aqVurKjynVZPHpZzCylsdVv8WR1Bb +bVIbmPiJxmRi3irjNzsmCeUV1V8JPpMxWBdV14NTcRkeJA2JpRXp8ZHhO9WryZV9 +uxxMiDS8NIlAI6Ljt1swrJQOv2sHk9Gbrgmpd1zTYjJzORXZHsQdQ6XAy/4yWwt8 +Gl+eg5ZRSyAE80TEIH0FFJcQ/9YZK/j9bxN+wGiuW4goNdBl84NJ8aq1G0NXDjyH +9WWypWfgURUoNBVmSek2ibRxSriqdFH8Tt+98w1a8EdLJKbPb0A5sV6PqqKUP59a +1AZ1kA0tLjh89Wz6+qjg9YhiCN7SO6eikdPWT/0r3SHtiztgDjgcqTFDNoFZdmZc +jb6eD0nuoRRfWXVZ57aX8WwD37xljKt7e06W7gsq4fXyRYZvQpNHga+83YCkVbxu +pPgPjgq4F/JquIUVfOx3CMmLsvE5p2U0zLGzG1WYgW5AShDfo2LXtjOz4wmRFnfY +pFO+CreWiG3OElwae77JiHXSc7+8pCOE3Buh9SRI8ioJPhb4uxV3paFH9uDTQjpC +nVMI5uOHg0tmWZgTShB/tzDV1KFVTZCw3fABxwARAQABiQIfBBgBCAAJBQJUVSG/ +AhsMAAoJEJGmhovT96kHb/0P/0LXAOXeyTDWOHEoPFKIXS4y9tl2PA6hq1HOgthP +1B2P6eIjpB7UGTSieWpKeqkkv7SZFTol2H0JlhZlhh1IkxS/aHHAl2Km6TLkk6QL +GGkKOFFAiU51iVkJQumbTKMlx11DXA0Jy6mVsUWoz3Ua9cFwrhuCRpKxW61xTEaX +dksgOUBKWH+mF8MtJtRedwHXjmNxaKTAKEsjmPFPn8i75D48JIbq9L+rHLxFTeSR +LShj7lZR1I24+UofA2Tllh4V14rSsUkfIYsKuwCGenJ+sPhpwqHohfJzTewXk+TK +wkilwVgTg7AYCeywP7XqkhA4om9aJRc1cqPcrknsXJLz4Vp7JX8bCtRqF2JT7wsM +wtHMNAtItLa+WYnkvt9/ng9Zt5i0fHZBwfVazWP+/4LAkb9fE4vO2IusV0jK00Sk +7Gt65A32qY75Lze6NRUk2gwizMLIdMvag9AuIUH52RScNVoVXIkmw1q57KshBL1M +VWRd7DUpFGpw8HKkqNlJKPAv+UsJAp7rSkfH9CAYwFzjbs7BST5Cuynac0CgZGQO +F0793mKAsbMePuEIzkR0ZdA/F0Mar9/tQLAtU3pXRrThkLUNmr8Qm9rPGTjrNv7k +ANWsgd4bu0PW5SVm+eFjoTRpNI9P/xrCF8fgLcZ2JPO/wKqyIDcKxEZq978lxWDm +CwGc +=AV20 +-----END PGP PUBLIC KEY BLOCK----- +`) + +// GPGVerify checks the authenticity of data by verifying the signature sig, +// which must be ASCII armored (base64). When the signature matches, GPGVerify +// returns true and a nil error. +func GPGVerify(data, sig []byte) (ok bool, err error) { + keyring, err := openpgp.ReadArmoredKeyRing(bytes.NewReader(key)) + if err != nil { + fmt.Printf("reading keyring failed") + return false, err + } + + _, err = openpgp.CheckArmoredDetachedSignature(keyring, bytes.NewReader(data), bytes.NewReader(sig)) + if err != nil { + return false, err + } + + return true, nil +} diff --git a/internal/test/doc.go b/internal/test/doc.go new file mode 100644 index 000000000..060bad354 --- /dev/null +++ b/internal/test/doc.go @@ -0,0 +1,2 @@ +// Package test provides helper functions for writing tests for restic. +package test diff --git a/internal/test/helper.go b/internal/test/helper.go new file mode 100644 index 000000000..f0fc1f61b --- /dev/null +++ b/internal/test/helper.go @@ -0,0 +1,15 @@ +// +build go1.9 + +package test + +import "testing" + +// Helperer marks the current function as a test helper. +type Helperer interface { + Helper() +} + +// Helper returns a function that marks the current function as a helper function. +func Helper(t testing.TB) Helperer { + return t +} diff --git a/internal/test/helper_go18.go b/internal/test/helper_go18.go new file mode 100644 index 000000000..d4f8b8de6 --- /dev/null +++ b/internal/test/helper_go18.go @@ -0,0 +1,19 @@ +// +build !go1.9 + +package test + +import "testing" + +// Helperer marks the current function as a test helper. +type Helperer interface { + Helper() +} + +type fakeHelper struct{} + +func (fakeHelper) Helper() {} + +// Helper returns a function that marks the current function as a helper function. +func Helper(t testing.TB) Helperer { + return fakeHelper{} +} diff --git a/internal/test/helpers.go b/internal/test/helpers.go new file mode 100644 index 000000000..785189598 --- /dev/null +++ b/internal/test/helpers.go @@ -0,0 +1,204 @@ +package test + +import ( + "compress/bzip2" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "runtime" + "testing" + + "github.com/restic/restic/internal/errors" + + mrand "math/rand" +) + +// Assert fails the test if the condition is false. +func Assert(tb testing.TB, condition bool, msg string, v ...interface{}) { + if !condition { + _, file, line, _ := runtime.Caller(1) + fmt.Printf("\033[31m%s:%d: "+msg+"\033[39m\n\n", append([]interface{}{filepath.Base(file), line}, v...)...) + tb.FailNow() + } +} + +// OK fails the test if an err is not nil. +func OK(tb testing.TB, err error) { + if err != nil { + _, file, line, _ := runtime.Caller(1) + fmt.Printf("\033[31m%s:%d: unexpected error: %+v\033[39m\n\n", filepath.Base(file), line, err) + tb.FailNow() + } +} + +// OKs fails the test if any error from errs is not nil. +func OKs(tb testing.TB, errs []error) { + errFound := false + for _, err := range errs { + if err != nil { + errFound = true + _, file, line, _ := runtime.Caller(1) + fmt.Printf("\033[31m%s:%d: unexpected error: %+v\033[39m\n\n", filepath.Base(file), line, err.Error()) + } + } + if errFound { + tb.FailNow() + } +} + +// Equals fails the test if exp is not equal to act. +func Equals(tb testing.TB, exp, act interface{}) { + if !reflect.DeepEqual(exp, act) { + _, file, line, _ := runtime.Caller(1) + fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act) + tb.FailNow() + } +} + +// Random returns size bytes of pseudo-random data derived from the seed. +func Random(seed, count int) []byte { + p := make([]byte, count) + + rnd := mrand.New(mrand.NewSource(int64(seed))) + + for i := 0; i < len(p); i += 8 { + val := rnd.Int63() + var data = []byte{ + byte((val >> 0) & 0xff), + byte((val >> 8) & 0xff), + byte((val >> 16) & 0xff), + byte((val >> 24) & 0xff), + byte((val >> 32) & 0xff), + byte((val >> 40) & 0xff), + byte((val >> 48) & 0xff), + byte((val >> 56) & 0xff), + } + + for j := range data { + cur := i + j + if cur >= len(p) { + break + } + p[cur] = data[j] + } + } + + return p +} + +// SetupTarTestFixture extracts the tarFile to outputDir. +func SetupTarTestFixture(t testing.TB, outputDir, tarFile string) { + input, err := os.Open(tarFile) + OK(t, err) + defer input.Close() + + var rd io.Reader + switch filepath.Ext(tarFile) { + case ".gz": + r, err := gzip.NewReader(input) + OK(t, err) + + defer r.Close() + rd = r + case ".bzip2": + rd = bzip2.NewReader(input) + default: + rd = input + } + + cmd := exec.Command("tar", "xf", "-") + cmd.Dir = outputDir + + cmd.Stdin = rd + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + OK(t, cmd.Run()) +} + +// Env creates a test environment and extracts the repository fixture. +// Returned is the repo path and a cleanup function. +func Env(t testing.TB, repoFixture string) (repodir string, cleanup func()) { + tempdir, err := ioutil.TempDir(TestTempDir, "restic-test-env-") + OK(t, err) + + fd, err := os.Open(repoFixture) + if err != nil { + t.Fatal(err) + } + OK(t, fd.Close()) + + SetupTarTestFixture(t, tempdir, repoFixture) + + return filepath.Join(tempdir, "repo"), func() { + if !TestCleanupTempDirs { + t.Logf("leaving temporary directory %v used for test", tempdir) + return + } + + RemoveAll(t, tempdir) + } +} + +func isFile(fi os.FileInfo) bool { + return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0 +} + +// ResetReadOnly recursively resets the read-only flag recursively for dir. +// This is mainly used for tests on Windows, which is unable to delete a file +// set read-only. +func ResetReadOnly(t testing.TB, dir string) { + err := filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error { + if fi == nil { + return err + } + + if fi.IsDir() { + return os.Chmod(path, 0777) + } + + if isFile(fi) { + return os.Chmod(path, 0666) + } + + return nil + }) + if os.IsNotExist(errors.Cause(err)) { + err = nil + } + OK(t, err) +} + +// RemoveAll recursively resets the read-only flag of all files and dirs and +// afterwards uses os.RemoveAll() to remove the path. +func RemoveAll(t testing.TB, path string) { + ResetReadOnly(t, path) + err := os.RemoveAll(path) + if os.IsNotExist(errors.Cause(err)) { + err = nil + } + OK(t, err) +} + +// TempDir returns a temporary directory that is removed when cleanup is +// called, except if TestCleanupTempDirs is set to false. +func TempDir(t testing.TB) (path string, cleanup func()) { + tempdir, err := ioutil.TempDir(TestTempDir, "restic-test-") + if err != nil { + t.Fatal(err) + } + + return tempdir, func() { + if !TestCleanupTempDirs { + t.Logf("leaving temporary directory %v used for test", tempdir) + return + } + + RemoveAll(t, tempdir) + } +} diff --git a/internal/test/vars.go b/internal/test/vars.go new file mode 100644 index 000000000..616f969f6 --- /dev/null +++ b/internal/test/vars.go @@ -0,0 +1,57 @@ +package test + +import ( + "fmt" + "os" + "strings" + "testing" +) + +var ( + TestPassword = getStringVar("RESTIC_TEST_PASSWORD", "geheim") + TestCleanupTempDirs = getBoolVar("RESTIC_TEST_CLEANUP", true) + TestTempDir = getStringVar("RESTIC_TEST_TMPDIR", "") + RunIntegrationTest = getBoolVar("RESTIC_TEST_INTEGRATION", true) + RunFuseTest = getBoolVar("RESTIC_TEST_FUSE", true) + TestSFTPPath = getStringVar("RESTIC_TEST_SFTPPATH", "/usr/lib/ssh:/usr/lib/openssh:/usr/libexec") + TestWalkerPath = getStringVar("RESTIC_TEST_PATH", ".") + BenchArchiveDirectory = getStringVar("RESTIC_BENCH_DIR", ".") + TestS3Server = getStringVar("RESTIC_TEST_S3_SERVER", "") + TestRESTServer = getStringVar("RESTIC_TEST_REST_SERVER", "") + TestIntegrationDisallowSkip = getStringVar("RESTIC_TEST_DISALLOW_SKIP", "") +) + +func getStringVar(name, defaultValue string) string { + if e := os.Getenv(name); e != "" { + return e + } + + return defaultValue +} + +func getBoolVar(name string, defaultValue bool) bool { + if e := os.Getenv(name); e != "" { + switch e { + case "1": + return true + case "0": + return false + default: + fmt.Fprintf(os.Stderr, "invalid value for variable %q, using default\n", name) + } + } + + return defaultValue +} + +// SkipDisallowed fails the test if it needs to run. The environment +// variable RESTIC_TEST_DISALLOW_SKIP contains a comma-separated list of test +// names that must be run. If name is in this list, the test is marked as +// failed. +func SkipDisallowed(t testing.TB, name string) { + for _, s := range strings.Split(TestIntegrationDisallowSkip, ",") { + if s == name { + t.Fatalf("test %v is in list of tests that need to run ($RESTIC_TEST_DISALLOW_SKIP)", name) + } + } +} diff --git a/internal/textfile/read.go b/internal/textfile/read.go new file mode 100644 index 000000000..3129ba8fe --- /dev/null +++ b/internal/textfile/read.go @@ -0,0 +1,43 @@ +// Package textfile allows reading files that contain text. It automatically +// detects and converts several encodings and removes Byte Order Marks (BOM). +package textfile + +import ( + "bytes" + "io/ioutil" + + "golang.org/x/text/encoding/unicode" +) + +// All supported BOMs (Byte Order Marks) +var ( + bomUTF8 = []byte{0xef, 0xbb, 0xbf} + bomUTF16BigEndian = []byte{0xfe, 0xff} + bomUTF16LittleEndian = []byte{0xff, 0xfe} +) + +// Decode removes a byte order mark and converts the bytes to UTF-8. +func Decode(data []byte) ([]byte, error) { + if bytes.HasPrefix(data, bomUTF8) { + return data[len(bomUTF8):], nil + } + + if !bytes.HasPrefix(data, bomUTF16BigEndian) && !bytes.HasPrefix(data, bomUTF16LittleEndian) { + // no encoding specified, let's assume UTF-8 + return data, nil + } + + // UseBom means automatic endianness selection + e := unicode.UTF16(unicode.BigEndian, unicode.UseBOM) + return e.NewDecoder().Bytes(data) +} + +// Read returns the contents of the file, converted to UTF-8, stripped of any BOM. +func Read(filename string) ([]byte, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + + return Decode(data) +} diff --git a/internal/textfile/read_test.go b/internal/textfile/read_test.go new file mode 100644 index 000000000..572a33ebe --- /dev/null +++ b/internal/textfile/read_test.go @@ -0,0 +1,76 @@ +package textfile + +import ( + "bytes" + "encoding/hex" + "testing" + + "github.com/restic/restic/internal/fs" +) + +func writeTempfile(t testing.TB, data []byte) (fs.File, func()) { + f, removeTempfile := fs.TestTempFile(t, "restic-test-textfile-read-") + + _, err := f.Write(data) + if err != nil { + t.Fatal(err) + } + + err = f.Close() + if err != nil { + t.Fatal(err) + } + + return f, removeTempfile +} + +func dec(s string) []byte { + data, err := hex.DecodeString(s) + if err != nil { + panic(err) + } + return data +} + +func TestRead(t *testing.T) { + var tests = []struct { + data []byte + want []byte + }{ + {data: []byte("foo bar baz")}, + {data: []byte("Ööbär")}, + { + data: []byte("\xef\xbb\xbffööbär"), + want: []byte("fööbär"), + }, + { + data: dec("feff006600f600f6006200e40072"), + want: []byte("fööbär"), + }, + { + data: dec("fffe6600f600f6006200e4007200"), + want: []byte("fööbär"), + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + want := test.want + if want == nil { + want = test.data + } + + f, cleanup := writeTempfile(t, test.data) + defer cleanup() + + data, err := Read(f.Name()) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(want, data) { + t.Errorf("invalid data returned, want:\n %q\ngot:\n %q", want, data) + } + }) + } +} diff --git a/internal/ui/backup.go b/internal/ui/backup.go new file mode 100644 index 000000000..3a950d9ad --- /dev/null +++ b/internal/ui/backup.go @@ -0,0 +1,367 @@ +package ui + +import ( + "context" + "fmt" + "os" + "sort" + "sync" + "time" + + "github.com/restic/restic/internal/archiver" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/termstatus" +) + +type counter struct { + Files, Dirs uint + Bytes uint64 +} + +type fileWorkerMessage struct { + filename string + done bool +} + +// Backup reports progress for the `backup` command. +type Backup struct { + *Message + *StdioWrapper + + MinUpdatePause time.Duration + + term *termstatus.Terminal + v uint + start time.Time + + totalBytes uint64 + + totalCh chan counter + processedCh chan counter + errCh chan struct{} + workerCh chan fileWorkerMessage + finished chan struct{} + + summary struct { + sync.Mutex + Files, Dirs struct { + New uint + Changed uint + Unchanged uint + } + archiver.ItemStats + } +} + +// NewBackup returns a new backup progress reporter. +func NewBackup(term *termstatus.Terminal, verbosity uint) *Backup { + return &Backup{ + Message: NewMessage(term, verbosity), + StdioWrapper: NewStdioWrapper(term), + term: term, + v: verbosity, + start: time.Now(), + + // limit to 60fps by default + MinUpdatePause: time.Second / 60, + + totalCh: make(chan counter), + processedCh: make(chan counter), + errCh: make(chan struct{}), + workerCh: make(chan fileWorkerMessage), + finished: make(chan struct{}), + } +} + +// Run regularly updates the status lines. It should be called in a separate +// goroutine. +func (b *Backup) Run(ctx context.Context) error { + var ( + lastUpdate time.Time + total, processed counter + errors uint + started bool + currentFiles = make(map[string]struct{}) + secondsRemaining uint64 + ) + + t := time.NewTicker(time.Second) + defer t.Stop() + + for { + select { + case <-ctx.Done(): + return nil + case <-b.finished: + started = false + b.term.SetStatus([]string{""}) + case t, ok := <-b.totalCh: + if ok { + total = t + started = true + } else { + // scan has finished + b.totalCh = nil + b.totalBytes = total.Bytes + } + case s := <-b.processedCh: + processed.Files += s.Files + processed.Dirs += s.Dirs + processed.Bytes += s.Bytes + started = true + case <-b.errCh: + errors++ + started = true + case m := <-b.workerCh: + if m.done { + delete(currentFiles, m.filename) + } else { + currentFiles[m.filename] = struct{}{} + } + case <-t.C: + if !started { + continue + } + + if b.totalCh == nil { + secs := float64(time.Since(b.start) / time.Second) + todo := float64(total.Bytes - processed.Bytes) + secondsRemaining = uint64(secs / float64(processed.Bytes) * todo) + } + } + + // limit update frequency + if time.Since(lastUpdate) < b.MinUpdatePause { + continue + } + lastUpdate = time.Now() + + b.update(total, processed, errors, currentFiles, secondsRemaining) + } +} + +// update updates the status lines. +func (b *Backup) update(total, processed counter, errors uint, currentFiles map[string]struct{}, secs uint64) { + var status string + if total.Files == 0 && total.Dirs == 0 { + // no total count available yet + status = fmt.Sprintf("[%s] %v files, %s, %d errors", + formatDuration(time.Since(b.start)), + processed.Files, formatBytes(processed.Bytes), errors, + ) + } else { + var eta, percent string + + if secs > 0 && processed.Bytes < total.Bytes { + eta = fmt.Sprintf(" ETA %s", formatSeconds(secs)) + percent = formatPercent(processed.Bytes, total.Bytes) + percent += " " + } + + // include totals + status = fmt.Sprintf("[%s] %s%v files %s, total %v files %v, %d errors%s", + formatDuration(time.Since(b.start)), + percent, + processed.Files, + formatBytes(processed.Bytes), + total.Files, + formatBytes(total.Bytes), + errors, + eta, + ) + } + + lines := make([]string, 0, len(currentFiles)+1) + for filename := range currentFiles { + lines = append(lines, filename) + } + sort.Sort(sort.StringSlice(lines)) + lines = append([]string{status}, lines...) + + b.term.SetStatus(lines) +} + +// ScannerError is the error callback function for the scanner, it prints the +// error in verbose mode and returns nil. +func (b *Backup) ScannerError(item string, fi os.FileInfo, err error) error { + b.V("scan: %v\n", err) + return nil +} + +// Error is the error callback function for the archiver, it prints the error and returns nil. +func (b *Backup) Error(item string, fi os.FileInfo, err error) error { + b.E("error: %v\n", err) + b.errCh <- struct{}{} + return nil +} + +// StartFile is called when a file is being processed by a worker. +func (b *Backup) StartFile(filename string) { + b.workerCh <- fileWorkerMessage{ + filename: filename, + } +} + +// CompleteBlob is called for all saved blobs for files. +func (b *Backup) CompleteBlob(filename string, bytes uint64) { + b.processedCh <- counter{Bytes: bytes} +} + +func formatPercent(numerator uint64, denominator uint64) string { + if denominator == 0 { + return "" + } + + percent := 100.0 * float64(numerator) / float64(denominator) + + if percent > 100 { + percent = 100 + } + + return fmt.Sprintf("%3.2f%%", percent) +} + +func formatSeconds(sec uint64) string { + hours := sec / 3600 + sec -= hours * 3600 + min := sec / 60 + sec -= min * 60 + if hours > 0 { + return fmt.Sprintf("%d:%02d:%02d", hours, min, sec) + } + + return fmt.Sprintf("%d:%02d", min, sec) +} + +func formatDuration(d time.Duration) string { + sec := uint64(d / time.Second) + return formatSeconds(sec) +} + +func formatBytes(c uint64) string { + b := float64(c) + switch { + case c > 1<<40: + return fmt.Sprintf("%.3f TiB", b/(1<<40)) + case c > 1<<30: + return fmt.Sprintf("%.3f GiB", b/(1<<30)) + case c > 1<<20: + return fmt.Sprintf("%.3f MiB", b/(1<<20)) + case c > 1<<10: + return fmt.Sprintf("%.3f KiB", b/(1<<10)) + default: + return fmt.Sprintf("%d B", c) + } +} + +// CompleteItemFn is the status callback function for the archiver when a +// file/dir has been saved successfully. +func (b *Backup) CompleteItemFn(item string, previous, current *restic.Node, s archiver.ItemStats, d time.Duration) { + b.summary.Lock() + b.summary.ItemStats.Add(s) + b.summary.Unlock() + + if current == nil { + // error occurred, tell the status display to remove the line + b.workerCh <- fileWorkerMessage{ + filename: item, + done: true, + } + return + } + + switch current.Type { + case "file": + b.processedCh <- counter{Files: 1} + b.workerCh <- fileWorkerMessage{ + filename: item, + done: true, + } + case "dir": + b.processedCh <- counter{Dirs: 1} + } + + if current.Type == "dir" { + if previous == nil { + b.VV("new %v, saved in %.3fs (%v added, %v metadata)", item, d.Seconds(), formatBytes(s.DataSize), formatBytes(s.TreeSize)) + b.summary.Lock() + b.summary.Dirs.New++ + b.summary.Unlock() + return + } + + if previous.Equals(*current) { + b.VV("unchanged %v", item) + b.summary.Lock() + b.summary.Dirs.Unchanged++ + b.summary.Unlock() + } else { + b.VV("modified %v, saved in %.3fs (%v added, %v metadata)", item, d.Seconds(), formatBytes(s.DataSize), formatBytes(s.TreeSize)) + b.summary.Lock() + b.summary.Dirs.Changed++ + b.summary.Unlock() + } + + } else if current.Type == "file" { + + b.workerCh <- fileWorkerMessage{ + done: true, + filename: item, + } + + if previous == nil { + b.VV("new %v, saved in %.3fs (%v added)", item, d.Seconds(), formatBytes(s.DataSize)) + b.summary.Lock() + b.summary.Files.New++ + b.summary.Unlock() + return + } + + if previous.Equals(*current) { + b.VV("unchanged %v", item) + b.summary.Lock() + b.summary.Files.Unchanged++ + b.summary.Unlock() + } else { + b.VV("modified %v, saved in %.3fs (%v added)", item, d.Seconds(), formatBytes(s.DataSize)) + b.summary.Lock() + b.summary.Files.Changed++ + b.summary.Unlock() + } + } +} + +// ReportTotal sets the total stats up to now +func (b *Backup) ReportTotal(item string, s archiver.ScanStats) { + select { + case b.totalCh <- counter{Files: s.Files, Dirs: s.Dirs, Bytes: s.Bytes}: + case <-b.finished: + } + + if item == "" { + b.V("scan finished in %.3fs: %v files, %s", + time.Since(b.start).Seconds(), + s.Files, formatBytes(s.Bytes), + ) + close(b.totalCh) + return + } +} + +// Finish prints the finishing messages. +func (b *Backup) Finish() { + close(b.finished) + + b.P("\n") + b.P("Files: %5d new, %5d changed, %5d unmodified\n", b.summary.Files.New, b.summary.Files.Changed, b.summary.Files.Unchanged) + b.P("Dirs: %5d new, %5d changed, %5d unmodified\n", b.summary.Dirs.New, b.summary.Dirs.Changed, b.summary.Dirs.Unchanged) + b.V("Data Blobs: %5d new\n", b.summary.ItemStats.DataBlobs) + b.V("Tree Blobs: %5d new\n", b.summary.ItemStats.TreeBlobs) + b.P("Added to the repo: %-5s\n", formatBytes(b.summary.ItemStats.DataSize+b.summary.ItemStats.TreeSize)) + b.P("\n") + b.P("processed %v files, %v in %s", + b.summary.Files.New+b.summary.Files.Changed+b.summary.Files.Unchanged, + formatBytes(b.totalBytes), + formatDuration(time.Since(b.start)), + ) +} diff --git a/internal/ui/message.go b/internal/ui/message.go new file mode 100644 index 000000000..75e54b019 --- /dev/null +++ b/internal/ui/message.go @@ -0,0 +1,45 @@ +package ui + +import "github.com/restic/restic/internal/ui/termstatus" + +// Message reports progress with messages of different verbosity. +type Message struct { + term *termstatus.Terminal + v uint +} + +// NewMessage returns a message progress reporter with underlying terminal +// term. +func NewMessage(term *termstatus.Terminal, verbosity uint) *Message { + return &Message{ + term: term, + v: verbosity, + } +} + +// E reports an error +func (m *Message) E(msg string, args ...interface{}) { + m.term.Errorf(msg, args...) +} + +// P prints a message if verbosity >= 1, this is used for normal messages which +// are not errors. +func (m *Message) P(msg string, args ...interface{}) { + if m.v >= 1 { + m.term.Printf(msg, args...) + } +} + +// V prints a message if verbosity >= 2, this is used for verbose messages. +func (m *Message) V(msg string, args ...interface{}) { + if m.v >= 2 { + m.term.Printf(msg, args...) + } +} + +// VV prints a message if verbosity >= 3, this is used for debug messages. +func (m *Message) VV(msg string, args ...interface{}) { + if m.v >= 3 { + m.term.Printf(msg, args...) + } +} diff --git a/internal/ui/stdio_wrapper.go b/internal/ui/stdio_wrapper.go new file mode 100644 index 000000000..eccaefb7b --- /dev/null +++ b/internal/ui/stdio_wrapper.go @@ -0,0 +1,86 @@ +package ui + +import ( + "bytes" + "io" + + "github.com/restic/restic/internal/ui/termstatus" +) + +// StdioWrapper provides stdout and stderr integration with termstatus. +type StdioWrapper struct { + stdout *lineWriter + stderr *lineWriter +} + +// NewStdioWrapper initializes a new stdio wrapper that can be used in place of +// os.Stdout or os.Stderr. +func NewStdioWrapper(term *termstatus.Terminal) *StdioWrapper { + return &StdioWrapper{ + stdout: newLineWriter(term.Print), + stderr: newLineWriter(term.Error), + } +} + +// Stdout returns a writer that is line buffered and can be used in place of +// os.Stdout. On Close(), the remaining bytes are written, followed by a line +// break. +func (w *StdioWrapper) Stdout() io.WriteCloser { + return w.stdout +} + +// Stderr returns a writer that is line buffered and can be used in place of +// os.Stderr. On Close(), the remaining bytes are written, followed by a line +// break. +func (w *StdioWrapper) Stderr() io.WriteCloser { + return w.stderr +} + +type lineWriter struct { + buf *bytes.Buffer + print func(string) +} + +var _ io.WriteCloser = &lineWriter{} + +func newLineWriter(print func(string)) *lineWriter { + return &lineWriter{buf: bytes.NewBuffer(nil), print: print} +} + +func (w *lineWriter) Write(data []byte) (n int, err error) { + n, err = w.buf.Write(data) + if err != nil { + return n, err + } + + // look for line breaks + buf := w.buf.Bytes() + skip := 0 + for i := 0; i < len(buf); { + if buf[i] == '\n' { + // found line + w.print(string(buf[:i+1])) + buf = buf[i+1:] + skip += i + 1 + i = 0 + continue + } + + i++ + } + + _ = w.buf.Next(skip) + + return n, err +} + +func (w *lineWriter) Flush() error { + if w.buf.Len() > 0 { + w.print(string(append(w.buf.Bytes(), '\n'))) + } + return nil +} + +func (w *lineWriter) Close() error { + return w.Flush() +} diff --git a/internal/ui/stdio_wrapper_test.go b/internal/ui/stdio_wrapper_test.go new file mode 100644 index 000000000..fc071f992 --- /dev/null +++ b/internal/ui/stdio_wrapper_test.go @@ -0,0 +1,95 @@ +package ui + +import ( + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestStdioWrapper(t *testing.T) { + var tests = []struct { + inputs [][]byte + outputs []string + }{ + { + inputs: [][]byte{ + []byte("foo"), + }, + outputs: []string{ + "foo\n", + }, + }, + { + inputs: [][]byte{ + []byte("foo"), + []byte("bar"), + []byte("\n"), + []byte("baz"), + }, + outputs: []string{ + "foobar\n", + "baz\n", + }, + }, + { + inputs: [][]byte{ + []byte("foo"), + []byte("bar\nbaz\n"), + []byte("bump\n"), + }, + outputs: []string{ + "foobar\n", + "baz\n", + "bump\n", + }, + }, + { + inputs: [][]byte{ + []byte("foo"), + []byte("bar\nbaz\n"), + []byte("bum"), + []byte("p\nx"), + []byte("x"), + []byte("x"), + []byte("z"), + }, + outputs: []string{ + "foobar\n", + "baz\n", + "bump\n", + "xxxz\n", + }, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + var lines []string + print := func(s string) { + lines = append(lines, s) + } + + w := newLineWriter(print) + + for _, data := range test.inputs { + n, err := w.Write(data) + if err != nil { + t.Fatal(err) + } + + if n != len(data) { + t.Errorf("invalid length returned by Write, want %d, got %d", len(data), n) + } + } + + err := w.Close() + if err != nil { + t.Fatal(err) + } + + if !cmp.Equal(test.outputs, lines) { + t.Error(cmp.Diff(test.outputs, lines)) + } + }) + } +} diff --git a/internal/ui/table/table.go b/internal/ui/table/table.go new file mode 100644 index 000000000..8939f2ac9 --- /dev/null +++ b/internal/ui/table/table.go @@ -0,0 +1,206 @@ +package table + +import ( + "bytes" + "io" + "strings" + + "text/template" +) + +// Table contains data for a table to be printed. +type Table struct { + columns []string + templates []*template.Template + data []interface{} + footer []string + + CellSeparator string + PrintHeader func(io.Writer, string) error + PrintSeparator func(io.Writer, string) error + PrintData func(io.Writer, int, string) error + PrintFooter func(io.Writer, string) error +} + +var funcmap = template.FuncMap{ + "join": strings.Join, +} + +// New initializes a new Table +func New() *Table { + p := func(w io.Writer, s string) error { + _, err := w.Write(append([]byte(s), '\n')) + return err + } + return &Table{ + CellSeparator: " ", + PrintHeader: p, + PrintSeparator: p, + PrintData: func(w io.Writer, _ int, s string) error { + return p(w, s) + }, + PrintFooter: p, + } +} + +// AddColumn adds a new header field with the header and format, which is +// expected to be template string compatible with text/template. When compiling +// the format fails, AddColumn panics. +func (t *Table) AddColumn(header, format string) { + t.columns = append(t.columns, header) + tmpl, err := template.New("template for " + header).Funcs(funcmap).Parse(format) + if err != nil { + panic(err) + } + + t.templates = append(t.templates, tmpl) +} + +// AddRow adds a new row to the table, which is filled with data. +func (t *Table) AddRow(data interface{}) { + t.data = append(t.data, data) +} + +// AddFooter prints line after the table +func (t *Table) AddFooter(line string) { + t.footer = append(t.footer, line) +} + +func printLine(w io.Writer, print func(io.Writer, string) error, sep string, data []string, widths []int) error { + var fields [][]string + + maxLines := 1 + for _, d := range data { + lines := strings.Split(d, "\n") + if len(lines) > maxLines { + maxLines = len(lines) + } + fields = append(fields, lines) + } + + for i := 0; i < maxLines; i++ { + var s string + + for fieldNum, lines := range fields { + var v string + + if i < len(lines) { + v += lines[i] + } + + // apply padding + pad := widths[fieldNum] - len(v) + if pad > 0 { + v += strings.Repeat(" ", pad) + } + + if fieldNum > 0 { + v = sep + v + } + + s += v + } + + err := print(w, strings.TrimRight(s, " ")) + if err != nil { + return err + } + } + + return nil +} + +// Write prints the table to w. +func (t *Table) Write(w io.Writer) error { + columns := len(t.templates) + if columns == 0 { + return nil + } + + // collect all data fields from all columns + lines := make([][]string, 0, len(t.data)) + buf := bytes.NewBuffer(nil) + + for _, data := range t.data { + row := make([]string, 0, len(t.templates)) + for _, tmpl := range t.templates { + err := tmpl.Execute(buf, data) + if err != nil { + return err + } + + row = append(row, string(buf.Bytes())) + buf.Reset() + } + lines = append(lines, row) + } + + // find max width for each cell + columnWidths := make([]int, columns) + for i, desc := range t.columns { + for _, line := range strings.Split(desc, "\n") { + if columnWidths[i] < len(line) { + columnWidths[i] = len(desc) + } + } + } + for _, line := range lines { + for i, content := range line { + for _, l := range strings.Split(content, "\n") { + if columnWidths[i] < len(l) { + columnWidths[i] = len(l) + } + } + } + } + + // calculate the total width of the table + totalWidth := 0 + for _, width := range columnWidths { + totalWidth += width + } + totalWidth += (columns - 1) * len(t.CellSeparator) + + // write header + if len(t.columns) > 0 { + err := printLine(w, t.PrintHeader, t.CellSeparator, t.columns, columnWidths) + if err != nil { + return err + } + + // draw separation line + err = t.PrintSeparator(w, strings.Repeat("-", totalWidth)) + if err != nil { + return err + } + } + + // write all the lines + for i, line := range lines { + print := func(w io.Writer, s string) error { + return t.PrintData(w, i, s) + } + err := printLine(w, print, t.CellSeparator, line, columnWidths) + if err != nil { + return err + } + } + + // draw separation line + err := t.PrintSeparator(w, strings.Repeat("-", totalWidth)) + if err != nil { + return err + } + + if len(t.footer) > 0 { + // write the footer + for _, line := range t.footer { + err := t.PrintFooter(w, line) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/internal/ui/table/table_test.go b/internal/ui/table/table_test.go new file mode 100644 index 000000000..47b180a91 --- /dev/null +++ b/internal/ui/table/table_test.go @@ -0,0 +1,162 @@ +package table + +import ( + "bytes" + "strings" + "testing" +) + +func TestTable(t *testing.T) { + var tests = []struct { + create func(t testing.TB) *Table + output string + }{ + { + func(t testing.TB) *Table { + return New() + }, + "", + }, + { + func(t testing.TB) *Table { + table := New() + table.AddColumn("first column", "data: {{.First}}") + table.AddRow(struct{ First string }{"first data field"}) + return table + }, + ` +first column +---------------------- +data: first data field +---------------------- +`, + }, + { + func(t testing.TB) *Table { + table := New() + table.AddColumn(" first column ", "data: {{.First}}") + table.AddRow(struct{ First string }{"d"}) + return table + }, + ` + first column +---------------- +data: d +---------------- +`, + }, + { + func(t testing.TB) *Table { + table := New() + table.AddColumn("first column", "data: {{.First}}") + table.AddRow(struct{ First string }{"first data field"}) + table.AddRow(struct{ First string }{"second data field"}) + table.AddFooter("footer1") + table.AddFooter("footer2") + return table + }, + ` +first column +----------------------- +data: first data field +data: second data field +----------------------- +footer1 +footer2 +`, + }, + { + func(t testing.TB) *Table { + table := New() + table.AddColumn(" first name", `{{printf "%12s" .FirstName}}`) + table.AddColumn("last name", "{{.LastName}}") + table.AddRow(struct{ FirstName, LastName string }{"firstname", "lastname"}) + table.AddRow(struct{ FirstName, LastName string }{"John", "Doe"}) + table.AddRow(struct{ FirstName, LastName string }{"Johann", "van den Berjen"}) + return table + }, + ` + first name last name +---------------------------- + firstname lastname + John Doe + Johann van den Berjen +---------------------------- +`, + }, + { + func(t testing.TB) *Table { + table := New() + table.AddColumn("host name", `{{.Host}}`) + table.AddColumn("time", `{{.Time}}`) + table.AddColumn("zz", "xxx") + table.AddColumn("tags", `{{join .Tags ","}}`) + table.AddColumn("dirs", `{{join .Dirs ","}}`) + + type data struct { + Host string + Time string + Tags, Dirs []string + } + table.AddRow(data{"foo", "2018-08-19 22:22:22", []string{"work"}, []string{"/home/user/work"}}) + table.AddRow(data{"foo", "2018-08-19 22:22:22", []string{"other"}, []string{"/home/user/other"}}) + table.AddRow(data{"foo", "2018-08-19 22:22:22", []string{"other"}, []string{"/home/user/other"}}) + return table + }, + ` +host name time zz tags dirs +------------------------------------------------------------ +foo 2018-08-19 22:22:22 xxx work /home/user/work +foo 2018-08-19 22:22:22 xxx other /home/user/other +foo 2018-08-19 22:22:22 xxx other /home/user/other +------------------------------------------------------------ +`, + }, + { + func(t testing.TB) *Table { + table := New() + table.AddColumn("host name", `{{.Host}}`) + table.AddColumn("time", `{{.Time}}`) + table.AddColumn("zz", "xxx") + table.AddColumn("tags", `{{join .Tags "\n"}}`) + table.AddColumn("dirs", `{{join .Dirs "\n"}}`) + + type data struct { + Host string + Time string + Tags, Dirs []string + } + table.AddRow(data{"foo", "2018-08-19 22:22:22", []string{"work", "go"}, []string{"/home/user/work", "/home/user/go"}}) + table.AddRow(data{"foo", "2018-08-19 22:22:22", []string{"other"}, []string{"/home/user/other"}}) + table.AddRow(data{"foo", "2018-08-19 22:22:22", []string{"other", "bar"}, []string{"/home/user/other"}}) + return table + }, + ` +host name time zz tags dirs +------------------------------------------------------------ +foo 2018-08-19 22:22:22 xxx work /home/user/work + go /home/user/go +foo 2018-08-19 22:22:22 xxx other /home/user/other +foo 2018-08-19 22:22:22 xxx other /home/user/other + bar +------------------------------------------------------------ +`, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + table := test.create(t) + buf := bytes.NewBuffer(nil) + err := table.Write(buf) + if err != nil { + t.Fatal(err) + } + + want := strings.TrimLeft(test.output, "\n") + if string(buf.Bytes()) != want { + t.Errorf("wrong output\n---- want ---\n%s\n---- got ---\n%s\n-------\n", want, buf.Bytes()) + } + }) + } +} diff --git a/internal/ui/termstatus/background.go b/internal/ui/termstatus/background.go new file mode 100644 index 000000000..e371c18df --- /dev/null +++ b/internal/ui/termstatus/background.go @@ -0,0 +1,9 @@ +// +build !linux + +package termstatus + +// IsProcessBackground reports whether the current process is running in the +// background. Not implemented for this platform. +func IsProcessBackground() bool { + return false +} diff --git a/internal/ui/termstatus/background_linux.go b/internal/ui/termstatus/background_linux.go new file mode 100644 index 000000000..f99091128 --- /dev/null +++ b/internal/ui/termstatus/background_linux.go @@ -0,0 +1,21 @@ +package termstatus + +import ( + "syscall" + "unsafe" + + "github.com/restic/restic/internal/debug" +) + +// IsProcessBackground reports whether the current process is running in the background. +func IsProcessBackground() bool { + var pid int + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(syscall.Stdin), syscall.TIOCGPGRP, uintptr(unsafe.Pointer(&pid))) + + if err != 0 { + debug.Log("Can't check if we are in the background. Using default behaviour. Error: %s\n", err.Error()) + return false + } + + return pid != syscall.Getpgrp() +} diff --git a/internal/ui/termstatus/status.go b/internal/ui/termstatus/status.go new file mode 100644 index 000000000..d997d6bb1 --- /dev/null +++ b/internal/ui/termstatus/status.go @@ -0,0 +1,334 @@ +package termstatus + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "os" + "strings" +) + +// Terminal is used to write messages and display status lines which can be +// updated. When the output is redirected to a file, the status lines are not +// printed. +type Terminal struct { + wr *bufio.Writer + fd uintptr + errWriter io.Writer + buf *bytes.Buffer + msg chan message + status chan status + canUpdateStatus bool + + // will be closed when the goroutine which runs Run() terminates, so it'll + // yield a default value immediately + closed chan struct{} + + clearCurrentLine func(io.Writer, uintptr) + moveCursorUp func(io.Writer, uintptr, int) +} + +type message struct { + line string + err bool +} + +type status struct { + lines []string +} + +type fder interface { + Fd() uintptr +} + +// New returns a new Terminal for wr. A goroutine is started to update the +// terminal. It is terminated when ctx is cancelled. When wr is redirected to +// a file (e.g. via shell output redirection) or is just an io.Writer (not the +// open *os.File for stdout), no status lines are printed. The status lines and +// normal output (via Print/Printf) are written to wr, error messages are +// written to errWriter. If disableStatus is set to true, no status messages +// are printed even if the terminal supports it. +func New(wr io.Writer, errWriter io.Writer, disableStatus bool) *Terminal { + t := &Terminal{ + wr: bufio.NewWriter(wr), + errWriter: errWriter, + buf: bytes.NewBuffer(nil), + msg: make(chan message), + status: make(chan status), + closed: make(chan struct{}), + } + + if disableStatus { + return t + } + + if d, ok := wr.(fder); ok && canUpdateStatus(d.Fd()) { + // only use the fancy status code when we're running on a real terminal. + t.canUpdateStatus = true + t.fd = d.Fd() + t.clearCurrentLine = clearCurrentLine(wr, t.fd) + t.moveCursorUp = moveCursorUp(wr, t.fd) + } + + return t +} + +// Run updates the screen. It should be run in a separate goroutine. When +// ctx is cancelled, the status lines are cleanly removed. +func (t *Terminal) Run(ctx context.Context) { + defer close(t.closed) + if t.canUpdateStatus { + t.run(ctx) + return + } + + t.runWithoutStatus(ctx) +} + +type stringWriter interface { + WriteString(string) (int, error) +} + +// run listens on the channels and updates the terminal screen. +func (t *Terminal) run(ctx context.Context) { + var status []string + for { + select { + case <-ctx.Done(): + if IsProcessBackground() { + // ignore all messages, do nothing, we are in the background process group + continue + } + t.undoStatus(len(status)) + + return + + case msg := <-t.msg: + if IsProcessBackground() { + // ignore all messages, do nothing, we are in the background process group + continue + } + t.clearCurrentLine(t.wr, t.fd) + + var dst io.Writer + if msg.err { + dst = t.errWriter + + // assume t.wr and t.errWriter are different, so we need to + // flush clearing the current line + err := t.wr.Flush() + if err != nil { + fmt.Fprintf(os.Stderr, "flush failed: %v\n", err) + } + } else { + dst = t.wr + } + + var err error + if w, ok := dst.(stringWriter); ok { + _, err = w.WriteString(msg.line) + } else { + _, err = dst.Write([]byte(msg.line)) + } + + if err != nil { + fmt.Fprintf(os.Stderr, "write failed: %v\n", err) + continue + } + + t.writeStatus(status) + + err = t.wr.Flush() + if err != nil { + fmt.Fprintf(os.Stderr, "flush failed: %v\n", err) + } + + case stat := <-t.status: + if IsProcessBackground() { + // ignore all messages, do nothing, we are in the background process group + continue + } + + status = status[:0] + status = append(status, stat.lines...) + t.writeStatus(status) + } + } +} + +func (t *Terminal) writeStatus(status []string) { + for _, line := range status { + t.clearCurrentLine(t.wr, t.fd) + + _, err := t.wr.WriteString(line) + if err != nil { + fmt.Fprintf(os.Stderr, "write failed: %v\n", err) + } + + // flush is needed so that the current line is updated + err = t.wr.Flush() + if err != nil { + fmt.Fprintf(os.Stderr, "flush failed: %v\n", err) + } + } + + if len(status) > 0 { + t.moveCursorUp(t.wr, t.fd, len(status)-1) + } + + err := t.wr.Flush() + if err != nil { + fmt.Fprintf(os.Stderr, "flush failed: %v\n", err) + } +} + +// runWithoutStatus listens on the channels and just prints out the messages, +// without status lines. +func (t *Terminal) runWithoutStatus(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case msg := <-t.msg: + var err error + var flush func() error + + var dst io.Writer + if msg.err { + dst = t.errWriter + } else { + dst = t.wr + flush = t.wr.Flush + } + + if w, ok := dst.(stringWriter); ok { + _, err = w.WriteString(msg.line) + } else { + _, err = dst.Write([]byte(msg.line)) + } + + if err != nil { + fmt.Fprintf(os.Stderr, "write failed: %v\n", err) + } + + if flush == nil { + continue + } + + err = flush() + if err != nil { + fmt.Fprintf(os.Stderr, "flush failed: %v\n", err) + } + + case _ = <-t.status: + // discard status lines + } + } +} + +func (t *Terminal) undoStatus(lines int) { + for i := 0; i < lines; i++ { + t.clearCurrentLine(t.wr, t.fd) + + _, err := t.wr.WriteRune('\n') + if err != nil { + fmt.Fprintf(os.Stderr, "write failed: %v\n", err) + } + + // flush is needed so that the current line is updated + err = t.wr.Flush() + if err != nil { + fmt.Fprintf(os.Stderr, "flush failed: %v\n", err) + } + } + + t.moveCursorUp(t.wr, t.fd, lines) + + err := t.wr.Flush() + if err != nil { + fmt.Fprintf(os.Stderr, "flush failed: %v\n", err) + } +} + +// Print writes a line to the terminal. +func (t *Terminal) Print(line string) { + // make sure the line ends with a line break + if line[len(line)-1] != '\n' { + line += "\n" + } + + select { + case t.msg <- message{line: line}: + case <-t.closed: + } +} + +// Printf uses fmt.Sprintf to write a line to the terminal. +func (t *Terminal) Printf(msg string, args ...interface{}) { + s := fmt.Sprintf(msg, args...) + t.Print(s) +} + +// Error writes an error to the terminal. +func (t *Terminal) Error(line string) { + // make sure the line ends with a line break + if line[len(line)-1] != '\n' { + line += "\n" + } + + select { + case t.msg <- message{line: line, err: true}: + case <-t.closed: + } +} + +// Errorf uses fmt.Sprintf to write an error line to the terminal. +func (t *Terminal) Errorf(msg string, args ...interface{}) { + s := fmt.Sprintf(msg, args...) + t.Error(s) +} + +// truncate returns a string that has at most maxlen characters. If maxlen is +// negative, the empty string is returned. +func truncate(s string, maxlen int) string { + if maxlen < 0 { + return "" + } + + if len(s) < maxlen { + return s + } + + return s[:maxlen] +} + +// SetStatus updates the status lines. +func (t *Terminal) SetStatus(lines []string) { + if len(lines) == 0 { + return + } + + width, _, err := getTermSize(t.fd) + if err != nil || width <= 0 { + // use 80 columns by default + width = 80 + } + + // make sure that all lines have a line break and are not too long + for i, line := range lines { + line = strings.TrimRight(line, "\n") + line = truncate(line, width-2) + "\n" + lines[i] = line + } + + // make sure the last line does not have a line break + last := len(lines) - 1 + lines[last] = strings.TrimRight(lines[last], "\n") + + select { + case t.status <- status{lines: lines}: + case <-t.closed: + } +} diff --git a/internal/ui/termstatus/status_test.go b/internal/ui/termstatus/status_test.go new file mode 100644 index 000000000..6238d0532 --- /dev/null +++ b/internal/ui/termstatus/status_test.go @@ -0,0 +1,32 @@ +package termstatus + +import "testing" + +func TestTruncate(t *testing.T) { + var tests = []struct { + input string + maxlen int + output string + }{ + {"", 80, ""}, + {"", 0, ""}, + {"", -1, ""}, + {"foo", 80, "foo"}, + {"foo", 4, "foo"}, + {"foo", 3, "foo"}, + {"foo", 2, "fo"}, + {"foo", 1, "f"}, + {"foo", 0, ""}, + {"foo", -1, ""}, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + out := truncate(test.input, test.maxlen) + if out != test.output { + t.Fatalf("wrong output for input %v, maxlen %d: want %q, got %q", + test.input, test.maxlen, test.output, out) + } + }) + } +} diff --git a/internal/ui/termstatus/terminal_posix.go b/internal/ui/termstatus/terminal_posix.go new file mode 100644 index 000000000..c16a2d989 --- /dev/null +++ b/internal/ui/termstatus/terminal_posix.go @@ -0,0 +1,36 @@ +package termstatus + +import ( + "bytes" + "fmt" + "io" + "os" +) + +const ( + posixControlMoveCursorHome = "\r" + posixControlMoveCursorUp = "\x1b[1A" + posixControlClearLine = "\x1b[2K" +) + +// posixClearCurrentLine removes all characters from the current line and resets the +// cursor position to the first column. +func posixClearCurrentLine(wr io.Writer, fd uintptr) { + // clear current line + _, err := wr.Write([]byte(posixControlMoveCursorHome + posixControlClearLine)) + if err != nil { + fmt.Fprintf(os.Stderr, "write failed: %v\n", err) + return + } +} + +// posixMoveCursorUp moves the cursor to the line n lines above the current one. +func posixMoveCursorUp(wr io.Writer, fd uintptr, n int) { + data := []byte(posixControlMoveCursorHome) + data = append(data, bytes.Repeat([]byte(posixControlMoveCursorUp), n)...) + _, err := wr.Write(data) + if err != nil { + fmt.Fprintf(os.Stderr, "write failed: %v\n", err) + return + } +} diff --git a/internal/ui/termstatus/terminal_unix.go b/internal/ui/termstatus/terminal_unix.go new file mode 100644 index 000000000..d82c032d9 --- /dev/null +++ b/internal/ui/termstatus/terminal_unix.go @@ -0,0 +1,47 @@ +// +build !windows + +package termstatus + +import ( + "io" + "os" + + "golang.org/x/sys/unix" + + isatty "github.com/mattn/go-isatty" +) + +// clearCurrentLine removes all characters from the current line and resets the +// cursor position to the first column. +func clearCurrentLine(wr io.Writer, fd uintptr) func(io.Writer, uintptr) { + return posixClearCurrentLine +} + +// moveCursorUp moves the cursor to the line n lines above the current one. +func moveCursorUp(wr io.Writer, fd uintptr) func(io.Writer, uintptr, int) { + return posixMoveCursorUp +} + +// canUpdateStatus returns true if status lines can be printed, the process +// output is not redirected to a file or pipe. +func canUpdateStatus(fd uintptr) bool { + if !isatty.IsTerminal(fd) { + return false + } + term := os.Getenv("TERM") + if term == "" { + return false + } + // TODO actually read termcap db and detect if terminal supports what we need + return term != "dumb" +} + +// getTermSize returns the dimensions of the given terminal. +// the code is taken from "golang.org/x/crypto/ssh/terminal" +func getTermSize(fd uintptr) (width, height int, err error) { + ws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) + if err != nil { + return -1, -1, err + } + return int(ws.Col), int(ws.Row), nil +} diff --git a/internal/ui/termstatus/terminal_windows.go b/internal/ui/termstatus/terminal_windows.go new file mode 100644 index 000000000..5a46169c9 --- /dev/null +++ b/internal/ui/termstatus/terminal_windows.go @@ -0,0 +1,154 @@ +// +build windows + +package termstatus + +import ( + "io" + "syscall" + "unsafe" +) + +// clearCurrentLine removes all characters from the current line and resets the +// cursor position to the first column. +func clearCurrentLine(wr io.Writer, fd uintptr) func(io.Writer, uintptr) { + // easy case, the terminal is cmd or psh, without redirection + if isWindowsTerminal(fd) { + return windowsClearCurrentLine + } + + // check if the output file type is a pipe (0x0003) + if getFileType(fd) != fileTypePipe { + // return empty func, update state is not possible on this terminal + return func(io.Writer, uintptr) {} + } + + // assume we're running in mintty/cygwin + return posixClearCurrentLine +} + +// moveCursorUp moves the cursor to the line n lines above the current one. +func moveCursorUp(wr io.Writer, fd uintptr) func(io.Writer, uintptr, int) { + // easy case, the terminal is cmd or psh, without redirection + if isWindowsTerminal(fd) { + return windowsMoveCursorUp + } + + // check if the output file type is a pipe (0x0003) + if getFileType(fd) != fileTypePipe { + // return empty func, update state is not possible on this terminal + return func(io.Writer, uintptr, int) {} + } + + // assume we're running in mintty/cygwin + return posixMoveCursorUp +} + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procGetFileType = kernel32.NewProc("GetFileType") +) + +type ( + short int16 + word uint16 + dword uint32 + + coord struct { + x short + y short + } + smallRect struct { + left short + top short + right short + bottom short + } + consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord + } +) + +// windowsClearCurrentLine removes all characters from the current line and +// resets the cursor position to the first column. +func windowsClearCurrentLine(wr io.Writer, fd uintptr) { + var info consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(fd, uintptr(unsafe.Pointer(&info))) + + // clear the line + cursor := coord{ + x: info.window.left, + y: info.cursorPosition.y, + } + var count, w dword + count = dword(info.size.x) + procFillConsoleOutputAttribute.Call(fd, uintptr(info.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&w))) + procFillConsoleOutputCharacter.Call(fd, uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&w))) +} + +// windowsMoveCursorUp moves the cursor to the line n lines above the current one. +func windowsMoveCursorUp(wr io.Writer, fd uintptr, n int) { + var info consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(fd, uintptr(unsafe.Pointer(&info))) + + // move cursor up by n lines and to the first column + info.cursorPosition.y -= short(n) + info.cursorPosition.x = 0 + procSetConsoleCursorPosition.Call(fd, uintptr(*(*int32)(unsafe.Pointer(&info.cursorPosition)))) +} + +// getTermSize returns the dimensions of the given terminal. +// the code is taken from "golang.org/x/crypto/ssh/terminal" +func getTermSize(fd uintptr) (width, height int, err error) { + var info consoleScreenBufferInfo + _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, fd, uintptr(unsafe.Pointer(&info)), 0) + if e != 0 { + return 0, 0, error(e) + } + return int(info.size.x), int(info.size.y), nil +} + +// isWindowsTerminal return true if the file descriptor is a windows terminal (cmd, psh). +func isWindowsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +const fileTypePipe = 0x0003 + +// getFileType returns the file type for the given fd. +// https://msdn.microsoft.com/de-de/library/windows/desktop/aa364960(v=vs.85).aspx +func getFileType(fd uintptr) int { + r, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) + if e != 0 { + return 0 + } + return int(r) +} + +// canUpdateStatus returns true if status lines can be printed, the process +// output is not redirected to a file or pipe. +func canUpdateStatus(fd uintptr) bool { + // easy case, the terminal is cmd or psh, without redirection + if isWindowsTerminal(fd) { + return true + } + + // check if the output file type is a pipe (0x0003) + if getFileType(fd) != fileTypePipe { + return false + } + + // assume we're running in mintty/cygwin + return true +} diff --git a/internal/walker/testing.go b/internal/walker/testing.go new file mode 100644 index 000000000..c06778242 --- /dev/null +++ b/internal/walker/testing.go @@ -0,0 +1 @@ +package walker diff --git a/internal/walker/walker.go b/internal/walker/walker.go new file mode 100644 index 000000000..7d6db3abe --- /dev/null +++ b/internal/walker/walker.go @@ -0,0 +1,142 @@ +package walker + +import ( + "context" + "path" + "sort" + + "github.com/pkg/errors" + + "github.com/restic/restic/internal/restic" +) + +// TreeLoader loads a tree from a repository. +type TreeLoader interface { + LoadTree(context.Context, restic.ID) (*restic.Tree, error) +} + +// SkipNode is returned by WalkFunc when a dir node should not be walked. +var SkipNode = errors.New("skip this node") + +// WalkFunc is the type of the function called for each node visited by Walk. +// Path is the slash-separated path from the root node. If there was a problem +// loading a node, err is set to a non-nil error. WalkFunc can chose to ignore +// it by returning nil. +// +// When the special value SkipNode is returned and node is a dir node, it is +// not walked. When the node is not a dir node, the remaining items in this +// tree are skipped. +// +// Setting ignore to true tells Walk that it should not visit the node again. +// For tree nodes, this means that the function is not called for the +// referenced tree. If the node is not a tree, and all nodes in the current +// tree have ignore set to true, the current tree will not be visited again. +// When err is not nil and different from SkipNode, the value returned for +// ignore is ignored. +type WalkFunc func(parentTreeID restic.ID, path string, node *restic.Node, nodeErr error) (ignore bool, err error) + +// Walk calls walkFn recursively for each node in root. If walkFn returns an +// error, it is passed up the call stack. The trees in ignoreTrees are not +// walked. If walkFn ignores trees, these are added to the set. +func Walk(ctx context.Context, repo TreeLoader, root restic.ID, ignoreTrees restic.IDSet, walkFn WalkFunc) error { + tree, err := repo.LoadTree(ctx, root) + _, err = walkFn(root, "/", nil, err) + + if err != nil { + if err == SkipNode { + err = nil + } + return err + } + + if ignoreTrees == nil { + ignoreTrees = restic.NewIDSet() + } + + _, err = walk(ctx, repo, "/", root, tree, ignoreTrees, walkFn) + return err +} + +// walk recursively traverses the tree, ignoring subtrees when the ID of the +// subtree is in ignoreTrees. If err is nil and ignore is true, the subtree ID +// will be added to ignoreTrees by walk. +func walk(ctx context.Context, repo TreeLoader, prefix string, parentTreeID restic.ID, tree *restic.Tree, ignoreTrees restic.IDSet, walkFn WalkFunc) (ignore bool, err error) { + var allNodesIgnored = true + + if len(tree.Nodes) == 0 { + allNodesIgnored = false + } + + sort.Slice(tree.Nodes, func(i, j int) bool { + return tree.Nodes[i].Name < tree.Nodes[j].Name + }) + + for _, node := range tree.Nodes { + p := path.Join(prefix, node.Name) + + if node.Type == "" { + return false, errors.Errorf("node type is empty for node %q", node.Name) + } + + if node.Type != "dir" { + ignore, err := walkFn(parentTreeID, p, node, nil) + if err != nil { + if err == SkipNode { + // skip the remaining entries in this tree + return allNodesIgnored, nil + } + + return false, err + } + + if ignore == false { + allNodesIgnored = false + } + + continue + } + + if node.Subtree == nil { + return false, errors.Errorf("subtree for node %v in tree %v is nil", node.Name, p) + } + + if ignoreTrees.Has(*node.Subtree) { + continue + } + + subtree, err := repo.LoadTree(ctx, *node.Subtree) + ignore, err := walkFn(parentTreeID, p, node, err) + if err != nil { + if err == SkipNode { + if ignore { + ignoreTrees.Insert(*node.Subtree) + } + continue + } + return false, err + } + + if ignore { + ignoreTrees.Insert(*node.Subtree) + } + + if !ignore { + allNodesIgnored = false + } + + ignore, err = walk(ctx, repo, p, *node.Subtree, subtree, ignoreTrees, walkFn) + if err != nil { + return false, err + } + + if ignore { + ignoreTrees.Insert(*node.Subtree) + } + + if !ignore { + allNodesIgnored = false + } + } + + return allNodesIgnored, nil +} diff --git a/internal/walker/walker_test.go b/internal/walker/walker_test.go new file mode 100644 index 000000000..af5c25f42 --- /dev/null +++ b/internal/walker/walker_test.go @@ -0,0 +1,544 @@ +package walker + +import ( + "context" + "encoding/json" + "fmt" + "testing" + + "github.com/pkg/errors" + "github.com/restic/restic/internal/restic" +) + +// TestTree is used to construct a list of trees for testing the walker. +type TestTree map[string]interface{} + +// TestNode is used to test the walker. +type TestFile struct{} + +func BuildTreeMap(tree TestTree) (m TreeMap, root restic.ID) { + m = TreeMap{} + id := buildTreeMap(tree, m) + return m, id +} + +func buildTreeMap(tree TestTree, m TreeMap) restic.ID { + res := restic.NewTree() + + for name, item := range tree { + switch elem := item.(type) { + case TestFile: + res.Insert(&restic.Node{ + Name: name, + Type: "file", + }) + case TestTree: + id := buildTreeMap(elem, m) + res.Insert(&restic.Node{ + Name: name, + Subtree: &id, + Type: "dir", + }) + default: + panic(fmt.Sprintf("invalid type %T", elem)) + } + } + + buf, err := json.Marshal(res) + if err != nil { + panic(err) + } + + id := restic.Hash(buf) + + if _, ok := m[id]; !ok { + m[id] = res + } + + return id +} + +// TreeMap returns the trees from the map on LoadTree. +type TreeMap map[restic.ID]*restic.Tree + +func (t TreeMap) LoadTree(ctx context.Context, id restic.ID) (*restic.Tree, error) { + tree, ok := t[id] + if !ok { + return nil, errors.New("tree not found") + } + + return tree, nil +} + +// checkFunc returns a function suitable for walking the tree to check +// something, and a function which will check the final result. +type checkFunc func(t testing.TB) (walker WalkFunc, final func(testing.TB)) + +// checkItemOrder ensures that the order of the 'path' arguments is the one passed in as 'want'. +func checkItemOrder(want []string) checkFunc { + pos := 0 + return func(t testing.TB) (walker WalkFunc, final func(testing.TB)) { + walker = func(treeID restic.ID, path string, node *restic.Node, err error) (bool, error) { + if err != nil { + t.Errorf("error walking %v: %v", path, err) + return false, err + } + + if pos >= len(want) { + t.Errorf("additional unexpected path found: %v", path) + return false, nil + } + + if path != want[pos] { + t.Errorf("wrong path found, want %q, got %q", want[pos], path) + } + pos++ + return false, nil + } + + final = func(t testing.TB) { + if pos != len(want) { + t.Errorf("not enough items returned, want %d, got %d", len(want), pos) + } + } + + return walker, final + } +} + +// checkParentTreeOrder ensures that the order of the 'parentID' arguments is the one passed in as 'want'. +func checkParentTreeOrder(want []string) checkFunc { + pos := 0 + return func(t testing.TB) (walker WalkFunc, final func(testing.TB)) { + walker = func(treeID restic.ID, path string, node *restic.Node, err error) (bool, error) { + if err != nil { + t.Errorf("error walking %v: %v", path, err) + return false, err + } + + if pos >= len(want) { + t.Errorf("additional unexpected parent tree ID found: %v", treeID) + return false, nil + } + + if treeID.String() != want[pos] { + t.Errorf("wrong parent tree ID found, want %q, got %q", want[pos], treeID.String()) + } + pos++ + return false, nil + } + + final = func(t testing.TB) { + if pos != len(want) { + t.Errorf("not enough items returned, want %d, got %d", len(want), pos) + } + } + + return walker, final + } +} + +// checkSkipFor returns SkipNode if path is in skipFor, it checks that the +// paths the walk func is called for are exactly the ones in wantPaths. +func checkSkipFor(skipFor map[string]struct{}, wantPaths []string) checkFunc { + var pos int + + return func(t testing.TB) (walker WalkFunc, final func(testing.TB)) { + walker = func(treeID restic.ID, path string, node *restic.Node, err error) (bool, error) { + if err != nil { + t.Errorf("error walking %v: %v", path, err) + return false, err + } + + if pos >= len(wantPaths) { + t.Errorf("additional unexpected path found: %v", path) + return false, nil + } + + if path != wantPaths[pos] { + t.Errorf("wrong path found, want %q, got %q", wantPaths[pos], path) + } + pos++ + + if _, ok := skipFor[path]; ok { + return false, SkipNode + } + + return false, nil + } + + final = func(t testing.TB) { + if pos != len(wantPaths) { + t.Errorf("wrong number of paths returned, want %d, got %d", len(wantPaths), pos) + } + } + + return walker, final + } +} + +// checkIgnore returns SkipNode if path is in skipFor and sets ignore according +// to ignoreFor. It checks that the paths the walk func is called for are exactly +// the ones in wantPaths. +func checkIgnore(skipFor map[string]struct{}, ignoreFor map[string]bool, wantPaths []string) checkFunc { + var pos int + + return func(t testing.TB) (walker WalkFunc, final func(testing.TB)) { + walker = func(treeID restic.ID, path string, node *restic.Node, err error) (bool, error) { + if err != nil { + t.Errorf("error walking %v: %v", path, err) + return false, err + } + + if pos >= len(wantPaths) { + t.Errorf("additional unexpected path found: %v", path) + return ignoreFor[path], nil + } + + if path != wantPaths[pos] { + t.Errorf("wrong path found, want %q, got %q", wantPaths[pos], path) + } + pos++ + + if _, ok := skipFor[path]; ok { + return ignoreFor[path], SkipNode + } + + return ignoreFor[path], nil + } + + final = func(t testing.TB) { + if pos != len(wantPaths) { + t.Errorf("wrong number of paths returned, want %d, got %d", len(wantPaths), pos) + } + } + + return walker, final + } +} + +func TestWalker(t *testing.T) { + var tests = []struct { + tree TestTree + checks []checkFunc + }{ + { + tree: TestTree{ + "foo": TestFile{}, + "subdir": TestTree{ + "subfile": TestFile{}, + }, + }, + checks: []checkFunc{ + checkItemOrder([]string{ + "/", + "/foo", + "/subdir", + "/subdir/subfile", + }), + checkParentTreeOrder([]string{ + "2593e9dba52232c043d68c40d0f9c236b4448e37224941298ea6e223ca1e3a1b", // tree / + "2593e9dba52232c043d68c40d0f9c236b4448e37224941298ea6e223ca1e3a1b", // tree / + "2593e9dba52232c043d68c40d0f9c236b4448e37224941298ea6e223ca1e3a1b", // tree / + "a7f5be55bdd94db9df706a428e0726a4044720c9c94b9ebeb81000debe032087", // tree /subdir + }), + checkSkipFor( + map[string]struct{}{ + "/subdir": struct{}{}, + }, []string{ + "/", + "/foo", + "/subdir", + }, + ), + checkIgnore( + map[string]struct{}{}, map[string]bool{ + "/subdir": true, + }, []string{ + "/", + "/foo", + "/subdir", + "/subdir/subfile", + }, + ), + }, + }, + { + tree: TestTree{ + "foo": TestFile{}, + "subdir1": TestTree{ + "subfile1": TestFile{}, + }, + "subdir2": TestTree{ + "subfile2": TestFile{}, + "subsubdir2": TestTree{ + "subsubfile3": TestFile{}, + }, + }, + }, + checks: []checkFunc{ + checkItemOrder([]string{ + "/", + "/foo", + "/subdir1", + "/subdir1/subfile1", + "/subdir2", + "/subdir2/subfile2", + "/subdir2/subsubdir2", + "/subdir2/subsubdir2/subsubfile3", + }), + checkParentTreeOrder([]string{ + "31c86f0bc298086b787b5d24e9e33ea566c224be2939ed66a817f7fb6fdba700", // tree / + "31c86f0bc298086b787b5d24e9e33ea566c224be2939ed66a817f7fb6fdba700", // tree / + "31c86f0bc298086b787b5d24e9e33ea566c224be2939ed66a817f7fb6fdba700", // tree / + "af838dc7a83d353f0273c33d93fcdba3220d4517576f09694a971dd23b8e94dc", // tree /subdir1 + "31c86f0bc298086b787b5d24e9e33ea566c224be2939ed66a817f7fb6fdba700", // tree / + "fb749ba6ae01a3814bed9b59d74af8d7593d3074a681d4112c4983d461089e5b", // tree /subdir2 + "fb749ba6ae01a3814bed9b59d74af8d7593d3074a681d4112c4983d461089e5b", // tree /subdir2 + "eb8dd587a9c5e6be87b69d2c5264a19622f75bf6704927aaebaee78d0992531d", // tree /subdir2/subsubdir2 + }), + checkSkipFor( + map[string]struct{}{ + "/subdir1": struct{}{}, + }, []string{ + "/", + "/foo", + "/subdir1", + "/subdir2", + "/subdir2/subfile2", + "/subdir2/subsubdir2", + "/subdir2/subsubdir2/subsubfile3", + }, + ), + checkSkipFor( + map[string]struct{}{ + "/subdir1": struct{}{}, + "/subdir2/subsubdir2": struct{}{}, + }, []string{ + "/", + "/foo", + "/subdir1", + "/subdir2", + "/subdir2/subfile2", + "/subdir2/subsubdir2", + }, + ), + checkSkipFor( + map[string]struct{}{ + "/foo": struct{}{}, + }, []string{ + "/", + "/foo", + }, + ), + }, + }, + { + tree: TestTree{ + "foo": TestFile{}, + "subdir1": TestTree{ + "subfile1": TestFile{}, + "subfile2": TestFile{}, + "subfile3": TestFile{}, + }, + "subdir2": TestTree{ + "subfile1": TestFile{}, + "subfile2": TestFile{}, + "subfile3": TestFile{}, + }, + "subdir3": TestTree{ + "subfile1": TestFile{}, + "subfile2": TestFile{}, + "subfile3": TestFile{}, + }, + "zzz other": TestFile{}, + }, + checks: []checkFunc{ + checkItemOrder([]string{ + "/", + "/foo", + "/subdir1", + "/subdir1/subfile1", + "/subdir1/subfile2", + "/subdir1/subfile3", + "/subdir2", + "/subdir2/subfile1", + "/subdir2/subfile2", + "/subdir2/subfile3", + "/subdir3", + "/subdir3/subfile1", + "/subdir3/subfile2", + "/subdir3/subfile3", + "/zzz other", + }), + checkParentTreeOrder([]string{ + "b37368f62fdd6f8f3d19f9ef23c6534988e26db4e5dddc21d206b16b6a17a58f", // tree / + "b37368f62fdd6f8f3d19f9ef23c6534988e26db4e5dddc21d206b16b6a17a58f", // tree / + "b37368f62fdd6f8f3d19f9ef23c6534988e26db4e5dddc21d206b16b6a17a58f", // tree / + "787b9260d4f0f8298f5cf58945681961982eb6aa1c526845206c5b353aeb4351", // tree /subdir1 + "787b9260d4f0f8298f5cf58945681961982eb6aa1c526845206c5b353aeb4351", // tree /subdir1 + "787b9260d4f0f8298f5cf58945681961982eb6aa1c526845206c5b353aeb4351", // tree /subdir1 + "b37368f62fdd6f8f3d19f9ef23c6534988e26db4e5dddc21d206b16b6a17a58f", // tree / + "787b9260d4f0f8298f5cf58945681961982eb6aa1c526845206c5b353aeb4351", // tree /subdir2 + "787b9260d4f0f8298f5cf58945681961982eb6aa1c526845206c5b353aeb4351", // tree /subdir2 + "787b9260d4f0f8298f5cf58945681961982eb6aa1c526845206c5b353aeb4351", // tree /subdir2 + "b37368f62fdd6f8f3d19f9ef23c6534988e26db4e5dddc21d206b16b6a17a58f", // tree / + "787b9260d4f0f8298f5cf58945681961982eb6aa1c526845206c5b353aeb4351", // tree /subdir3 + "787b9260d4f0f8298f5cf58945681961982eb6aa1c526845206c5b353aeb4351", // tree /subdir3 + "787b9260d4f0f8298f5cf58945681961982eb6aa1c526845206c5b353aeb4351", // tree /subdir3 + "b37368f62fdd6f8f3d19f9ef23c6534988e26db4e5dddc21d206b16b6a17a58f", // tree / + }), + checkIgnore( + map[string]struct{}{ + "/subdir1": struct{}{}, + }, map[string]bool{ + "/subdir1": true, + }, []string{ + "/", + "/foo", + "/subdir1", + "/zzz other", + }, + ), + checkIgnore( + map[string]struct{}{}, map[string]bool{ + "/subdir1": true, + }, []string{ + "/", + "/foo", + "/subdir1", + "/subdir1/subfile1", + "/subdir1/subfile2", + "/subdir1/subfile3", + "/zzz other", + }, + ), + checkIgnore( + map[string]struct{}{ + "/subdir2": struct{}{}, + }, map[string]bool{ + "/subdir2": true, + }, []string{ + "/", + "/foo", + "/subdir1", + "/subdir1/subfile1", + "/subdir1/subfile2", + "/subdir1/subfile3", + "/subdir2", + "/zzz other", + }, + ), + checkIgnore( + map[string]struct{}{}, map[string]bool{ + "/subdir1/subfile1": true, + "/subdir1/subfile2": true, + "/subdir1/subfile3": true, + }, []string{ + "/", + "/foo", + "/subdir1", + "/subdir1/subfile1", + "/subdir1/subfile2", + "/subdir1/subfile3", + "/zzz other", + }, + ), + checkIgnore( + map[string]struct{}{}, map[string]bool{ + "/subdir2/subfile1": true, + "/subdir2/subfile2": true, + "/subdir2/subfile3": true, + }, []string{ + "/", + "/foo", + "/subdir1", + "/subdir1/subfile1", + "/subdir1/subfile2", + "/subdir1/subfile3", + "/subdir2", + "/subdir2/subfile1", + "/subdir2/subfile2", + "/subdir2/subfile3", + "/zzz other", + }, + ), + }, + }, + { + tree: TestTree{ + "subdir1": TestTree{}, + "subdir2": TestTree{}, + "subdir3": TestTree{ + "file": TestFile{}, + }, + "subdir4": TestTree{ + "file": TestFile{}, + }, + "subdir5": TestTree{}, + "subdir6": TestTree{}, + }, + checks: []checkFunc{ + checkItemOrder([]string{ + "/", + "/subdir1", + "/subdir2", + "/subdir3", + "/subdir3/file", + "/subdir4", + "/subdir4/file", + "/subdir5", + "/subdir6", + }), + }, + }, + { + tree: TestTree{ + "subdir1": TestTree{}, + "subdir2": TestTree{}, + "subdir3": TestTree{ + "file": TestFile{}, + }, + "subdir4": TestTree{}, + "subdir5": TestTree{ + "file": TestFile{}, + }, + "subdir6": TestTree{}, + }, + checks: []checkFunc{ + checkIgnore( + map[string]struct{}{}, map[string]bool{ + "/subdir2": true, + }, []string{ + "/", + "/subdir1", + "/subdir2", + "/subdir3", + "/subdir3/file", + "/subdir5", + "/subdir5/file", + }, + ), + }, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + repo, root := BuildTreeMap(test.tree) + for _, check := range test.checks { + t.Run("", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + fn, last := check(t) + err := Walk(ctx, repo, root, restic.NewIDSet(), fn) + if err != nil { + t.Error(err) + } + last(t) + }) + } + }) + } +} diff --git a/run_integration_tests.go b/run_integration_tests.go new file mode 100644 index 000000000..8d55055b3 --- /dev/null +++ b/run_integration_tests.go @@ -0,0 +1,673 @@ +// +build ignore + +package main + +import ( + "bufio" + "bytes" + "encoding/base64" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" +) + +// ForbiddenImports are the packages from the stdlib that should not be used in +// our code. +var ForbiddenImports = map[string]bool{ + "errors": true, +} + +// Use a specific version of gofmt (the latest stable, usually) to guarantee +// deterministic formatting. This is used with the GoVersion.AtLeast() +// function (so that we don't forget to update it). +var GofmtVersion = ParseGoVersion("go1.11") + +// GoVersion is the version of Go used to compile the project. +type GoVersion struct { + Major int + Minor int + Patch int +} + +// ParseGoVersion parses the Go version s. If s cannot be parsed, the returned GoVersion is null. +func ParseGoVersion(s string) (v GoVersion) { + if !strings.HasPrefix(s, "go") { + return + } + + s = s[2:] + data := strings.Split(s, ".") + if len(data) < 2 || len(data) > 3 { + // invalid version + return GoVersion{} + } + + var err error + + v.Major, err = strconv.Atoi(data[0]) + if err != nil { + return GoVersion{} + } + + // try to parse the minor version while removing an eventual suffix (like + // "rc2" or so) + for s := data[1]; s != ""; s = s[:len(s)-1] { + v.Minor, err = strconv.Atoi(s) + if err == nil { + break + } + } + + if v.Minor == 0 { + // no minor version found + return GoVersion{} + } + + if len(data) >= 3 { + v.Patch, err = strconv.Atoi(data[2]) + if err != nil { + return GoVersion{} + } + } + + return +} + +// AtLeast returns true if v is at least as new as other. If v is empty, true is returned. +func (v GoVersion) AtLeast(other GoVersion) bool { + var empty GoVersion + + // the empty version satisfies all versions + if v == empty { + return true + } + + if v.Major < other.Major { + return false + } + + if v.Minor < other.Minor { + return false + } + + if v.Patch < other.Patch { + return false + } + + return true +} + +func (v GoVersion) String() string { + return fmt.Sprintf("Go %d.%d.%d", v.Major, v.Minor, v.Patch) +} + +// CloudBackends contains a map of backend tests for cloud services to one +// of the essential environment variables which must be present in order to +// test it. +var CloudBackends = map[string]string{ + "restic/backend/s3.TestBackendS3": "RESTIC_TEST_S3_REPOSITORY", + "restic/backend/swift.TestBackendSwift": "RESTIC_TEST_SWIFT", + "restic/backend/b2.TestBackendB2": "RESTIC_TEST_B2_REPOSITORY", + "restic/backend/gs.TestBackendGS": "RESTIC_TEST_GS_REPOSITORY", + "restic/backend/azure.TestBackendAzure": "RESTIC_TEST_AZURE_REPOSITORY", +} + +var runCrossCompile = flag.Bool("cross-compile", true, "run cross compilation tests") + +func init() { + flag.Parse() +} + +// CIEnvironment is implemented by environments where tests can be run. +type CIEnvironment interface { + Prepare() error + RunTests() error + Teardown() error +} + +// TravisEnvironment is the environment in which Travis tests run. +type TravisEnvironment struct { + goxOSArch []string + env map[string]string + gcsCredentialsFile string +} + +func (env *TravisEnvironment) getMinio() error { + tempfile, err := os.Create(filepath.Join(os.Getenv("GOPATH"), "bin", "minio")) + if err != nil { + return fmt.Errorf("create tempfile for minio download failed: %v", err) + } + + url := fmt.Sprintf("https://dl.minio.io/server/minio/release/%s-%s/minio", + runtime.GOOS, runtime.GOARCH) + msg("downloading %v\n", url) + res, err := http.Get(url) + if err != nil { + return fmt.Errorf("error downloading minio server: %v", err) + } + + _, err = io.Copy(tempfile, res.Body) + if err != nil { + return fmt.Errorf("error saving minio server to file: %v", err) + } + + err = res.Body.Close() + if err != nil { + return fmt.Errorf("error closing HTTP download: %v", err) + } + + err = tempfile.Close() + if err != nil { + msg("closing tempfile failed: %v\n", err) + return fmt.Errorf("error closing minio server file: %v", err) + } + + err = os.Chmod(tempfile.Name(), 0755) + if err != nil { + return fmt.Errorf("chmod(minio-server) failed: %v", err) + } + + msg("downloaded minio server to %v\n", tempfile.Name()) + return nil +} + +// Prepare installs dependencies and starts services in order to run the tests. +func (env *TravisEnvironment) Prepare() error { + env.env = make(map[string]string) + + msg("preparing environment for Travis CI\n") + + pkgs := []string{ + "github.com/NebulousLabs/glyphcheck", + "github.com/restic/rest-server/cmd/rest-server", + "github.com/restic/calens", + "github.com/ncw/rclone", + } + + for _, pkg := range pkgs { + err := run("go", "get", pkg) + if err != nil { + return err + } + } + + if err := env.getMinio(); err != nil { + return err + } + + if *runCrossCompile { + // only test cross compilation on linux with Travis + if err := run("go", "get", "github.com/mitchellh/gox"); err != nil { + return err + } + if runtime.GOOS == "linux" { + env.goxOSArch = []string{ + "linux/386", "linux/amd64", + "windows/386", "windows/amd64", + "darwin/386", "darwin/amd64", + "freebsd/386", "freebsd/amd64", + "openbsd/386", "openbsd/amd64", + "netbsd/386", "netbsd/amd64", + "linux/arm", "freebsd/arm", + } + + if os.Getenv("RESTIC_BUILD_SOLARIS") == "0" { + msg("Skipping Solaris build\n") + } else { + env.goxOSArch = append(env.goxOSArch, "solaris/amd64") + } + } else { + env.goxOSArch = []string{runtime.GOOS + "/" + runtime.GOARCH} + } + + msg("gox: OS/ARCH %v\n", env.goxOSArch) + } + + // do not run cloud tests on darwin + if os.Getenv("RESTIC_TEST_CLOUD_BACKENDS") == "0" { + msg("skipping cloud backend tests\n") + + for _, name := range CloudBackends { + err := os.Unsetenv(name) + if err != nil { + msg(" error unsetting %v: %v\n", name, err) + } + } + } + + // extract credentials file for GCS tests + if b64data := os.Getenv("RESTIC_TEST_GS_APPLICATION_CREDENTIALS_B64"); b64data != "" { + buf, err := base64.StdEncoding.DecodeString(b64data) + if err != nil { + return err + } + + f, err := ioutil.TempFile("", "gcs-credentials-") + if err != nil { + return err + } + + msg("saving GCS credentials to %v\n", f.Name()) + + _, err = f.Write(buf) + if err != nil { + f.Close() + return err + } + + env.gcsCredentialsFile = f.Name() + + if err = f.Close(); err != nil { + return err + } + } + + return nil +} + +// Teardown stops backend services and cleans the environment again. +func (env *TravisEnvironment) Teardown() error { + msg("run travis teardown\n") + + if env.gcsCredentialsFile != "" { + msg("remove gcs credentials file %v\n", env.gcsCredentialsFile) + return os.Remove(env.gcsCredentialsFile) + } + + return nil +} + +// RunTests starts the tests for Travis. +func (env *TravisEnvironment) RunTests() error { + env.env["GOPATH"] = os.Getenv("GOPATH") + if env.gcsCredentialsFile != "" { + env.env["GOOGLE_APPLICATION_CREDENTIALS"] = env.gcsCredentialsFile + } + + // ensure that the following tests cannot be silently skipped on Travis + ensureTests := []string{ + "restic/backend/rest.TestBackendREST", + "restic/backend/sftp.TestBackendSFTP", + "restic/backend/s3.TestBackendMinio", + "restic/backend/rclone.TestBackendRclone", + } + + // make sure that cloud backends for which we have credentials are not + // silently skipped. + for pkg, env := range CloudBackends { + if _, ok := os.LookupEnv(env); ok { + ensureTests = append(ensureTests, pkg) + } else { + msg("credentials for %v are not available, skipping\n", pkg) + } + } + + env.env["RESTIC_TEST_DISALLOW_SKIP"] = strings.Join(ensureTests, ",") + + if *runCrossCompile { + // compile for all target architectures with tags + for _, tags := range []string{"", "debug"} { + err := runWithEnv(env.env, "gox", "-verbose", + "-osarch", strings.Join(env.goxOSArch, " "), + "-tags", tags, + "-output", "/tmp/{{.Dir}}_{{.OS}}_{{.Arch}}", + "./cmd/restic") + if err != nil { + return err + } + } + } + + args := []string{"go", "run", "build.go"} + v := ParseGoVersion(runtime.Version()) + msg("Detected Go version %v\n", v) + if v.AtLeast(GoVersion{1, 11, 0}) { + args = []string{"go", "run", "-mod=vendor", "build.go"} + env.env["GOPROXY"] = "off" + delete(env.env, "GOPATH") + os.Unsetenv("GOPATH") + } + + // run the build script + err := run(args[0], args[1:]...) + if err != nil { + return err + } + + // run the tests and gather coverage information (for Go >= 1.10) + switch { + case v.AtLeast(GoVersion{1, 11, 0}): + err = runWithEnv(env.env, "go", "test", "-count", "1", "-mod=vendor", "-coverprofile", "all.cov", "./...") + case v.AtLeast(GoVersion{1, 10, 0}): + err = runWithEnv(env.env, "go", "test", "-count", "1", "-coverprofile", "all.cov", "./...") + default: + err = runWithEnv(env.env, "go", "test", "-count", "1", "./...") + } + if err != nil { + return err + } + + // only run gofmt on a specific version of Go. + if v.AtLeast(GofmtVersion) { + if err = runGofmt(); err != nil { + return err + } + + msg("run go mod vendor\n") + if err := runGoModVendor(); err != nil { + return err + } + + msg("run go mod tidy\n") + if err := runGoModTidy(); err != nil { + return err + } + } else { + msg("Skipping gofmt and module vendor check for %v\n", v) + } + + if err = runGlyphcheck(); err != nil { + return err + } + + // check for forbidden imports + deps, err := env.findImports() + if err != nil { + return err + } + + foundForbiddenImports := false + for name, imports := range deps { + for _, pkg := range imports { + if _, ok := ForbiddenImports[pkg]; ok { + fmt.Fprintf(os.Stderr, "========== package %v imports forbidden package %v\n", name, pkg) + foundForbiddenImports = true + } + } + } + + if foundForbiddenImports { + return errors.New("CI: forbidden imports found") + } + + // check that the entries in changelog/ are valid + if err := run("calens"); err != nil { + return errors.New("calens failed, files in changelog/ are not valid") + } + + return nil +} + +// AppveyorEnvironment is the environment on Windows. +type AppveyorEnvironment struct{} + +// Prepare installs dependencies and starts services in order to run the tests. +func (env *AppveyorEnvironment) Prepare() error { + return nil +} + +// RunTests start the tests. +func (env *AppveyorEnvironment) RunTests() error { + e := map[string]string{ + "GOPROXY": "off", + } + return runWithEnv(e, "go", "run", "-mod=vendor", "build.go", "-v", "-T") +} + +// Teardown is a noop. +func (env *AppveyorEnvironment) Teardown() error { + return nil +} + +// findGoFiles returns a list of go source code file names below dir. +func findGoFiles(dir string) (list []string, err error) { + err = filepath.Walk(dir, func(name string, fi os.FileInfo, err error) error { + relpath, err := filepath.Rel(dir, name) + if err != nil { + return err + } + + if relpath == "vendor" || relpath == "pkg" { + return filepath.SkipDir + } + + if filepath.Ext(relpath) == ".go" { + list = append(list, relpath) + } + + return err + }) + + return list, err +} + +func msg(format string, args ...interface{}) { + fmt.Printf("CI: "+format, args...) +} + +func updateEnv(env []string, override map[string]string) []string { + var newEnv []string + for _, s := range env { + d := strings.SplitN(s, "=", 2) + key := d[0] + + if _, ok := override[key]; ok { + continue + } + + newEnv = append(newEnv, s) + } + + for k, v := range override { + newEnv = append(newEnv, k+"="+v) + } + + return newEnv +} + +func (env *TravisEnvironment) findImports() (map[string][]string, error) { + res := make(map[string][]string) + + cmd := exec.Command("go", "list", "-f", `{{.ImportPath}} {{join .Imports " "}}`, "./internal/...", "./cmd/...") + cmd.Env = updateEnv(os.Environ(), env.env) + cmd.Stderr = os.Stderr + + output, err := cmd.Output() + if err != nil { + return nil, err + } + + sc := bufio.NewScanner(bytes.NewReader(output)) + for sc.Scan() { + wordScanner := bufio.NewScanner(strings.NewReader(sc.Text())) + wordScanner.Split(bufio.ScanWords) + + if !wordScanner.Scan() { + return nil, fmt.Errorf("package name not found in line: %s", output) + } + name := wordScanner.Text() + var deps []string + + for wordScanner.Scan() { + deps = append(deps, wordScanner.Text()) + } + + res[name] = deps + } + + return res, nil +} + +func runGofmt() error { + dir, err := os.Getwd() + if err != nil { + return fmt.Errorf("Getwd(): %v", err) + } + + files, err := findGoFiles(dir) + if err != nil { + return fmt.Errorf("error finding Go files: %v", err) + } + + msg("runGofmt() with %d files\n", len(files)) + args := append([]string{"-l"}, files...) + cmd := exec.Command("gofmt", args...) + cmd.Stderr = os.Stderr + + buf, err := cmd.Output() + if err != nil { + return fmt.Errorf("error running gofmt: %v\noutput: %s", err, buf) + } + + if len(buf) > 0 { + return fmt.Errorf("not formatted with `gofmt`:\n%s", buf) + } + + return nil +} + +func runGoModVendor() error { + cmd := exec.Command("go", "mod", "vendor") + cmd.Stderr = os.Stderr + cmd.Stdout = os.Stdout + cmd.Env = updateEnv(os.Environ(), map[string]string{ + "GO111MODULE": "on", + }) + + err := cmd.Run() + if err != nil { + return fmt.Errorf("error running 'go mod vendor': %v", err) + } + + // check that "git diff" does not return any output + cmd = exec.Command("git", "diff", "vendor") + cmd.Stderr = os.Stderr + + buf, err := cmd.Output() + if err != nil { + return fmt.Errorf("error running 'git diff vendor': %v\noutput: %s", err, buf) + } + + if len(buf) > 0 { + return fmt.Errorf("vendor/ directory was modified:\n%s", buf) + } + + return nil +} + +// run "go mod tidy" so that go.sum and go.mod are updated to reflect all +// dependencies for all OS/Arch combinations, see +// https://github.com/golang/go/wiki/Modules#why-does-go-mod-tidy-put-so-many-indirect-dependencies-in-my-gomod +func runGoModTidy() error { + cmd := exec.Command("go", "mod", "tidy") + cmd.Stderr = os.Stderr + cmd.Stdout = os.Stdout + cmd.Env = updateEnv(os.Environ(), map[string]string{ + "GO111MODULE": "on", + }) + + err := cmd.Run() + if err != nil { + return fmt.Errorf("error running 'go mod vendor': %v", err) + } + + // check that "git diff" does not return any output + cmd = exec.Command("git", "diff", "go.sum", "go.mod") + cmd.Stderr = os.Stderr + + buf, err := cmd.Output() + if err != nil { + return fmt.Errorf("error running 'git diff vendor': %v\noutput: %s", err, buf) + } + + if len(buf) > 0 { + return fmt.Errorf("vendor/ directory was modified:\n%s", buf) + } + + return nil +} + +func runGlyphcheck() error { + cmd := exec.Command("glyphcheck", "./cmd/...", "./internal/...") + cmd.Stderr = os.Stderr + + buf, err := cmd.Output() + if err != nil { + return fmt.Errorf("error running glyphcheck: %v\noutput: %s", err, buf) + } + + return nil +} + +func run(command string, args ...string) error { + msg("run %v %v\n", command, strings.Join(args, " ")) + return runWithEnv(nil, command, args...) +} + +// runWithEnv calls a command with the current environment, except the entries +// of the env map are set additionally. +func runWithEnv(env map[string]string, command string, args ...string) error { + msg("runWithEnv %v %v\n", command, strings.Join(args, " ")) + cmd := exec.Command(command, args...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if env != nil { + cmd.Env = updateEnv(os.Environ(), env) + } + err := cmd.Run() + + if err != nil { + return fmt.Errorf("error running %v %v: %v", + command, strings.Join(args, " "), err) + } + return nil +} + +func isTravis() bool { + return os.Getenv("TRAVIS_BUILD_DIR") != "" +} + +func isAppveyor() bool { + return runtime.GOOS == "windows" +} + +func main() { + var env CIEnvironment + + switch { + case isTravis(): + env = &TravisEnvironment{} + case isAppveyor(): + env = &AppveyorEnvironment{} + default: + fmt.Fprintln(os.Stderr, "unknown CI environment") + os.Exit(1) + } + + err := env.Prepare() + if err != nil { + fmt.Fprintf(os.Stderr, "error preparing: %v\n", err) + os.Exit(1) + } + + err = env.RunTests() + if err != nil { + fmt.Fprintf(os.Stderr, "error running tests: %v\n", err) + os.Exit(2) + } + + err = env.Teardown() + if err != nil { + fmt.Fprintf(os.Stderr, "error during teardown: %v\n", err) + os.Exit(3) + } +} -- cgit v1.2.3 From 16a0eedfa086c5efbaf1841e541bac2ce71e532b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A9lix=20Sipma?= Date: Sat, 9 Jun 2018 10:31:31 +0200 Subject: privacy breach Gbp-Pq: Name 0001-privacy-breach.patch --- README.rst | 27 --------------------------- 1 file changed, 27 deletions(-) diff --git a/README.rst b/README.rst index 76110ea36..791329d9e 100644 --- a/README.rst +++ b/README.rst @@ -1,5 +1,3 @@ -|Documentation| |Build Status| |Build status| |Report Card| |Say Thanks| |TestCoverage| |Reviewed by Hound| - Introduction ------------ @@ -110,28 +108,3 @@ License Restic is licensed under `BSD 2-Clause License `__. You can find the complete text in ``LICENSE``. - -Sponsorship ------------ - -Backend integration tests for Google Cloud Storage and Microsoft Azure Blob -Storage are sponsored by `AppsCode `__! - -|AppsCode| - -.. |Documentation| image:: https://readthedocs.org/projects/restic/badge/?version=latest - :target: https://restic.readthedocs.io/en/latest/?badge=latest -.. |Build Status| image:: https://travis-ci.com/restic/restic.svg?branch=master - :target: https://travis-ci.com/restic/restic -.. |Build status| image:: https://ci.appveyor.com/api/projects/status/nuy4lfbgfbytw92q/branch/master?svg=true - :target: https://ci.appveyor.com/project/fd0/restic/branch/master -.. |Report Card| image:: https://goreportcard.com/badge/github.com/restic/restic - :target: https://goreportcard.com/report/github.com/restic/restic -.. |Say Thanks| image:: https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg - :target: https://saythanks.io/to/restic -.. |TestCoverage| image:: https://codecov.io/gh/restic/restic/branch/master/graph/badge.svg - :target: https://codecov.io/gh/restic/restic -.. |AppsCode| image:: https://cdn.appscode.com/images/logo/appscode/ac-logo-color.png - :target: https://appscode.com -.. |Reviewed by Hound| image:: https://img.shields.io/badge/Reviewed_by-Hound-8E64B0.svg - :target: https://houndci.com -- cgit v1.2.3 From a565a36fc00b12217e4474e4a58668a793d3dde8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A9lix=20Sipma?= Date: Sat, 9 Jun 2018 10:31:31 +0200 Subject: privacy breach Gbp-Pq: Name 0001-privacy-breach.patch --- README.rst | 27 --------------------------- 1 file changed, 27 deletions(-) diff --git a/README.rst b/README.rst index 76110ea36..791329d9e 100644 --- a/README.rst +++ b/README.rst @@ -1,5 +1,3 @@ -|Documentation| |Build Status| |Build status| |Report Card| |Say Thanks| |TestCoverage| |Reviewed by Hound| - Introduction ------------ @@ -110,28 +108,3 @@ License Restic is licensed under `BSD 2-Clause License `__. You can find the complete text in ``LICENSE``. - -Sponsorship ------------ - -Backend integration tests for Google Cloud Storage and Microsoft Azure Blob -Storage are sponsored by `AppsCode `__! - -|AppsCode| - -.. |Documentation| image:: https://readthedocs.org/projects/restic/badge/?version=latest - :target: https://restic.readthedocs.io/en/latest/?badge=latest -.. |Build Status| image:: https://travis-ci.com/restic/restic.svg?branch=master - :target: https://travis-ci.com/restic/restic -.. |Build status| image:: https://ci.appveyor.com/api/projects/status/nuy4lfbgfbytw92q/branch/master?svg=true - :target: https://ci.appveyor.com/project/fd0/restic/branch/master -.. |Report Card| image:: https://goreportcard.com/badge/github.com/restic/restic - :target: https://goreportcard.com/report/github.com/restic/restic -.. |Say Thanks| image:: https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg - :target: https://saythanks.io/to/restic -.. |TestCoverage| image:: https://codecov.io/gh/restic/restic/branch/master/graph/badge.svg - :target: https://codecov.io/gh/restic/restic -.. |AppsCode| image:: https://cdn.appscode.com/images/logo/appscode/ac-logo-color.png - :target: https://appscode.com -.. |Reviewed by Hound| image:: https://img.shields.io/badge/Reviewed_by-Hound-8E64B0.svg - :target: https://houndci.com -- cgit v1.2.3 From fce61b931d517beaa3457f298995012a9d70a3cf Mon Sep 17 00:00:00 2001 From: Debian Go Packaging Team Date: Sat, 9 Feb 2019 18:53:40 -0500 Subject: fix-message-in-quiet-mode commit 60c7020bcb3be64dc1131a8564a7818898a50c83 (upstream/fix-2140) Author: Alexander Neumann Date: Thu Jan 10 21:27:56 2019 +0100 Upstream: https://github.com/restic/restic/commit/60c7020bcb3be64dc1131a8564a7818898a50c83 Bug-Debian: http://bug.debian.org/921849 Print message in verbose mode only Closes #2140 Gbp-Pq: Name 0002-fix-message-in-quiet-mode.patch --- cmd/restic/cmd_backup.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index fc24868a5..dff1d31db 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -395,7 +395,9 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina var t tomb.Tomb - term.Print("open repository\n") + if gopts.verbosity >= 2 { + term.Print("open repository\n") + } repo, err := OpenRepository(gopts) if err != nil { return err -- cgit v1.2.3 From dfa4471409cd0b7cab8301d1c8b6d93703224a1c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A9lix=20Sipma?= Date: Sat, 9 Jun 2018 10:31:31 +0200 Subject: privacy breach Gbp-Pq: Name 0001-privacy-breach.patch --- README.rst | 27 --------------------------- 1 file changed, 27 deletions(-) diff --git a/README.rst b/README.rst index 76110ea36..791329d9e 100644 --- a/README.rst +++ b/README.rst @@ -1,5 +1,3 @@ -|Documentation| |Build Status| |Build status| |Report Card| |Say Thanks| |TestCoverage| |Reviewed by Hound| - Introduction ------------ @@ -110,28 +108,3 @@ License Restic is licensed under `BSD 2-Clause License `__. You can find the complete text in ``LICENSE``. - -Sponsorship ------------ - -Backend integration tests for Google Cloud Storage and Microsoft Azure Blob -Storage are sponsored by `AppsCode `__! - -|AppsCode| - -.. |Documentation| image:: https://readthedocs.org/projects/restic/badge/?version=latest - :target: https://restic.readthedocs.io/en/latest/?badge=latest -.. |Build Status| image:: https://travis-ci.com/restic/restic.svg?branch=master - :target: https://travis-ci.com/restic/restic -.. |Build status| image:: https://ci.appveyor.com/api/projects/status/nuy4lfbgfbytw92q/branch/master?svg=true - :target: https://ci.appveyor.com/project/fd0/restic/branch/master -.. |Report Card| image:: https://goreportcard.com/badge/github.com/restic/restic - :target: https://goreportcard.com/report/github.com/restic/restic -.. |Say Thanks| image:: https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg - :target: https://saythanks.io/to/restic -.. |TestCoverage| image:: https://codecov.io/gh/restic/restic/branch/master/graph/badge.svg - :target: https://codecov.io/gh/restic/restic -.. |AppsCode| image:: https://cdn.appscode.com/images/logo/appscode/ac-logo-color.png - :target: https://appscode.com -.. |Reviewed by Hound| image:: https://img.shields.io/badge/Reviewed_by-Hound-8E64B0.svg - :target: https://houndci.com -- cgit v1.2.3 From b82b03076a731cc61322e5564121969e9f5e4679 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A9lix=20Sipma?= Date: Sat, 23 Nov 2019 20:24:26 +0100 Subject: privacy breach Gbp-Pq: Name 0001-privacy-breach.patch --- README.rst | 25 ------------------------- 1 file changed, 25 deletions(-) diff --git a/README.rst b/README.rst index 3dede3629..791329d9e 100644 --- a/README.rst +++ b/README.rst @@ -1,5 +1,3 @@ -|Documentation| |Build Status| |Build status| |Report Card| |Say Thanks| |TestCoverage| |Reviewed by Hound| - Introduction ------------ @@ -110,26 +108,3 @@ License Restic is licensed under `BSD 2-Clause License `__. You can find the complete text in ``LICENSE``. - -Sponsorship ------------ - -Backend integration tests for Google Cloud Storage and Microsoft Azure Blob -Storage are sponsored by `AppsCode `__! - -|AppsCode| - -.. |Documentation| image:: https://readthedocs.org/projects/restic/badge/?version=latest - :target: https://restic.readthedocs.io/en/latest/?badge=latest -.. |Build Status| image:: https://travis-ci.com/restic/restic.svg?branch=master - :target: https://travis-ci.com/restic/restic -.. |Build status| image:: https://ci.appveyor.com/api/projects/status/nuy4lfbgfbytw92q/branch/master?svg=true - :target: https://ci.appveyor.com/project/fd0/restic/branch/master -.. |Report Card| image:: https://goreportcard.com/badge/github.com/restic/restic - :target: https://goreportcard.com/report/github.com/restic/restic -.. |Say Thanks| image:: https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg - :target: https://saythanks.io/to/restic -.. |AppsCode| image:: https://cdn.appscode.com/images/logo/appscode/ac-logo-color.png - :target: https://appscode.com -.. |Reviewed by Hound| image:: https://img.shields.io/badge/Reviewed_by-Hound-8E64B0.svg - :target: https://houndci.com -- cgit v1.2.3 From 9ea5b0b9b9eb9347b13cbbd96ebe4f5b6a8ae945 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A9lix=20Sipma?= Date: Sun, 20 Sep 2020 15:40:12 +0200 Subject: privacy-breach Gbp-Pq: Name 0001-privacy-breach.patch --- README.rst | 25 ------------------------- 1 file changed, 25 deletions(-) diff --git a/README.rst b/README.rst index 690ffecdd..791329d9e 100644 --- a/README.rst +++ b/README.rst @@ -1,5 +1,3 @@ -|Documentation| |Build Status| |Build status| |Report Card| |Say Thanks| |Reviewed by Hound| - Introduction ------------ @@ -110,26 +108,3 @@ License Restic is licensed under `BSD 2-Clause License `__. You can find the complete text in ``LICENSE``. - -Sponsorship ------------ - -Backend integration tests for Google Cloud Storage and Microsoft Azure Blob -Storage are sponsored by `AppsCode `__! - -|AppsCode| - -.. |Documentation| image:: https://readthedocs.org/projects/restic/badge/?version=latest - :target: https://restic.readthedocs.io/en/latest/?badge=latest -.. |Build Status| image:: https://travis-ci.com/restic/restic.svg?branch=master - :target: https://travis-ci.com/restic/restic -.. |Build status| image:: https://ci.appveyor.com/api/projects/status/nuy4lfbgfbytw92q/branch/master?svg=true - :target: https://ci.appveyor.com/project/fd0/restic/branch/master -.. |Report Card| image:: https://goreportcard.com/badge/github.com/restic/restic - :target: https://goreportcard.com/report/github.com/restic/restic -.. |Say Thanks| image:: https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg - :target: https://saythanks.io/to/restic -.. |AppsCode| image:: https://cdn.appscode.com/images/logo/appscode/ac-logo-color.png - :target: https://appscode.com -.. |Reviewed by Hound| image:: https://img.shields.io/badge/Reviewed_by-Hound-8E64B0.svg - :target: https://houndci.com -- cgit v1.2.3 From 3f7abe5694c2993a8502c396f7f9394e041a1b5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A9lix=20Sipma?= Date: Sun, 20 Sep 2020 15:40:12 +0200 Subject: privacy-breach Gbp-Pq: Name 0001-privacy-breach.patch --- README.rst | 25 ------------------------- 1 file changed, 25 deletions(-) diff --git a/README.rst b/README.rst index 690ffecdd..791329d9e 100644 --- a/README.rst +++ b/README.rst @@ -1,5 +1,3 @@ -|Documentation| |Build Status| |Build status| |Report Card| |Say Thanks| |Reviewed by Hound| - Introduction ------------ @@ -110,26 +108,3 @@ License Restic is licensed under `BSD 2-Clause License `__. You can find the complete text in ``LICENSE``. - -Sponsorship ------------ - -Backend integration tests for Google Cloud Storage and Microsoft Azure Blob -Storage are sponsored by `AppsCode `__! - -|AppsCode| - -.. |Documentation| image:: https://readthedocs.org/projects/restic/badge/?version=latest - :target: https://restic.readthedocs.io/en/latest/?badge=latest -.. |Build Status| image:: https://travis-ci.com/restic/restic.svg?branch=master - :target: https://travis-ci.com/restic/restic -.. |Build status| image:: https://ci.appveyor.com/api/projects/status/nuy4lfbgfbytw92q/branch/master?svg=true - :target: https://ci.appveyor.com/project/fd0/restic/branch/master -.. |Report Card| image:: https://goreportcard.com/badge/github.com/restic/restic - :target: https://goreportcard.com/report/github.com/restic/restic -.. |Say Thanks| image:: https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg - :target: https://saythanks.io/to/restic -.. |AppsCode| image:: https://cdn.appscode.com/images/logo/appscode/ac-logo-color.png - :target: https://appscode.com -.. |Reviewed by Hound| image:: https://img.shields.io/badge/Reviewed_by-Hound-8E64B0.svg - :target: https://houndci.com -- cgit v1.2.3 From 519df549ea8ab0e834f1203759524a3984d1487a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A9lix=20Sipma?= Date: Sun, 15 Nov 2020 18:58:05 +0100 Subject: privacy breach Gbp-Pq: Name 0001-privacy-breach.patch --- README.md | 7 ------- 1 file changed, 7 deletions(-) diff --git a/README.md b/README.md index e46cc2b55..003f8235b 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,3 @@ -[![Documentation](https://readthedocs.org/projects/restic/badge/?version=latest)](https://restic.readthedocs.io/en/latest/?badge=latest) -[![Build Status Travis](https://travis-ci.com/restic/restic.svg?branch=master)](https://travis-ci.com/restic/restic) -[![Build Status AppVeyor](https://ci.appveyor.com/api/projects/status/nuy4lfbgfbytw92q/branch/master?svg=true)](https://ci.appveyor.com/project/fd0/restic/branch/master) -[![Go Report Card](https://goreportcard.com/badge/github.com/restic/restic)](https://goreportcard.com/report/github.com/restic/restic) - # Introduction restic is a backup program that is fast, efficient and secure. It supports the three major operating systems (Linux, macOS, Windows) and a few smaller ones (FreeBSD, OpenBSD). @@ -110,5 +105,3 @@ Sponsorship Backend integration tests for Google Cloud Storage and Microsoft Azure Blob Storage are sponsored by [AppsCode](https://appscode.com)! - -[![Sponsored by AppsCode](https://cdn.appscode.com/images/logo/appscode/ac-logo-color.png)](https://appscode.com) -- cgit v1.2.3 From 230357f66293d0a4ab1ba28aba5cc737f640fa29 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A9lix=20Sipma?= Date: Fri, 5 Feb 2021 09:08:41 +0100 Subject: use golang-github-cenkalti-backoff v3 Gbp-Pq: Name 0002-use-golang-github-cenkalti-backoff-v3.patch --- internal/backend/backend_retry.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/backend/backend_retry.go b/internal/backend/backend_retry.go index d47be2396..372d0a98f 100644 --- a/internal/backend/backend_retry.go +++ b/internal/backend/backend_retry.go @@ -6,7 +6,7 @@ import ( "io" "time" - "github.com/cenkalti/backoff/v4" + "github.com/cenkalti/backoff" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/restic" ) -- cgit v1.2.3 From 3f6bee7c066f3b944a5521ff57e80b66d6f2986d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A9lix=20Sipma?= Date: Sun, 15 Nov 2020 18:58:05 +0100 Subject: privacy breach Gbp-Pq: Name 0001-privacy-breach.patch --- README.md | 6 ------ 1 file changed, 6 deletions(-) diff --git a/README.md b/README.md index 558c743d4..003f8235b 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,3 @@ -[![Documentation](https://readthedocs.org/projects/restic/badge/?version=latest)](https://restic.readthedocs.io/en/latest/?badge=latest) -[![Build Status](https://github.com/restic/restic/workflows/test/badge.svg)](https://github.com/restic/restic/actions?query=workflow%3Atest) -[![Go Report Card](https://goreportcard.com/badge/github.com/restic/restic)](https://goreportcard.com/report/github.com/restic/restic) - # Introduction restic is a backup program that is fast, efficient and secure. It supports the three major operating systems (Linux, macOS, Windows) and a few smaller ones (FreeBSD, OpenBSD). @@ -109,5 +105,3 @@ Sponsorship Backend integration tests for Google Cloud Storage and Microsoft Azure Blob Storage are sponsored by [AppsCode](https://appscode.com)! - -[![Sponsored by AppsCode](https://cdn.appscode.com/images/logo/appscode/ac-logo-color.png)](https://appscode.com) -- cgit v1.2.3 From f92dd0f1ce66b6119e10937d3ec8e36d41765fe8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A9lix=20Sipma?= Date: Sun, 15 Nov 2020 18:58:05 +0100 Subject: privacy breach Gbp-Pq: Name 0001-privacy-breach.patch --- README.md | 6 ------ 1 file changed, 6 deletions(-) diff --git a/README.md b/README.md index 558c743d4..003f8235b 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,3 @@ -[![Documentation](https://readthedocs.org/projects/restic/badge/?version=latest)](https://restic.readthedocs.io/en/latest/?badge=latest) -[![Build Status](https://github.com/restic/restic/workflows/test/badge.svg)](https://github.com/restic/restic/actions?query=workflow%3Atest) -[![Go Report Card](https://goreportcard.com/badge/github.com/restic/restic)](https://goreportcard.com/report/github.com/restic/restic) - # Introduction restic is a backup program that is fast, efficient and secure. It supports the three major operating systems (Linux, macOS, Windows) and a few smaller ones (FreeBSD, OpenBSD). @@ -109,5 +105,3 @@ Sponsorship Backend integration tests for Google Cloud Storage and Microsoft Azure Blob Storage are sponsored by [AppsCode](https://appscode.com)! - -[![Sponsored by AppsCode](https://cdn.appscode.com/images/logo/appscode/ac-logo-color.png)](https://appscode.com) -- cgit v1.2.3 From aa21227bd36a7b2efaaaa3bb1bbdbd7e1959e253 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A9lix=20Sipma?= Date: Sun, 15 Nov 2020 18:58:05 +0100 Subject: privacy breach Gbp-Pq: Name 0001-privacy-breach.patch --- README.md | 6 ------ 1 file changed, 6 deletions(-) diff --git a/README.md b/README.md index 8f72a0200..dfbd58e62 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,3 @@ -[![Documentation](https://readthedocs.org/projects/restic/badge/?version=latest)](https://restic.readthedocs.io/en/latest/?badge=latest) -[![Build Status](https://github.com/restic/restic/workflows/test/badge.svg)](https://github.com/restic/restic/actions?query=workflow%3Atest) -[![Go Report Card](https://goreportcard.com/badge/github.com/restic/restic)](https://goreportcard.com/report/github.com/restic/restic) - # Introduction restic is a backup program that is fast, efficient and secure. It supports the three major operating systems (Linux, macOS, Windows) and a few smaller ones (FreeBSD, OpenBSD). @@ -109,5 +105,3 @@ Sponsorship Backend integration tests for Google Cloud Storage and Microsoft Azure Blob Storage are sponsored by [AppsCode](https://appscode.com)! - -[![Sponsored by AppsCode](https://cdn.appscode.com/images/logo/appscode/ac-logo-color.png)](https://appscode.com) -- cgit v1.2.3 From b08fc056ed281ab6703aa0d14029e8cd71d9055a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A9lix=20Sipma?= Date: Sun, 15 Nov 2020 18:58:05 +0100 Subject: privacy breach Gbp-Pq: Name 0001-privacy-breach.patch --- README.md | 6 ------ 1 file changed, 6 deletions(-) diff --git a/README.md b/README.md index 8f72a0200..dfbd58e62 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,3 @@ -[![Documentation](https://readthedocs.org/projects/restic/badge/?version=latest)](https://restic.readthedocs.io/en/latest/?badge=latest) -[![Build Status](https://github.com/restic/restic/workflows/test/badge.svg)](https://github.com/restic/restic/actions?query=workflow%3Atest) -[![Go Report Card](https://goreportcard.com/badge/github.com/restic/restic)](https://goreportcard.com/report/github.com/restic/restic) - # Introduction restic is a backup program that is fast, efficient and secure. It supports the three major operating systems (Linux, macOS, Windows) and a few smaller ones (FreeBSD, OpenBSD). @@ -109,5 +105,3 @@ Sponsorship Backend integration tests for Google Cloud Storage and Microsoft Azure Blob Storage are sponsored by [AppsCode](https://appscode.com)! - -[![Sponsored by AppsCode](https://cdn.appscode.com/images/logo/appscode/ac-logo-color.png)](https://appscode.com) -- cgit v1.2.3 From 21ad8c4750378d5746d0d2658492684e5820be36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A9lix=20Sipma?= Date: Sun, 15 Nov 2020 18:58:05 +0100 Subject: privacy breach Gbp-Pq: Name 0001-privacy-breach.patch --- README.md | 6 ------ 1 file changed, 6 deletions(-) diff --git a/README.md b/README.md index 8f72a0200..dfbd58e62 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,3 @@ -[![Documentation](https://readthedocs.org/projects/restic/badge/?version=latest)](https://restic.readthedocs.io/en/latest/?badge=latest) -[![Build Status](https://github.com/restic/restic/workflows/test/badge.svg)](https://github.com/restic/restic/actions?query=workflow%3Atest) -[![Go Report Card](https://goreportcard.com/badge/github.com/restic/restic)](https://goreportcard.com/report/github.com/restic/restic) - # Introduction restic is a backup program that is fast, efficient and secure. It supports the three major operating systems (Linux, macOS, Windows) and a few smaller ones (FreeBSD, OpenBSD). @@ -109,5 +105,3 @@ Sponsorship Backend integration tests for Google Cloud Storage and Microsoft Azure Blob Storage are sponsored by [AppsCode](https://appscode.com)! - -[![Sponsored by AppsCode](https://cdn.appscode.com/images/logo/appscode/ac-logo-color.png)](https://appscode.com) -- cgit v1.2.3 From 222d94e1a04f5fd9fc8a6e0d23ee14e5061a7846 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A9lix=20Sipma?= Date: Mon, 23 Oct 2023 21:55:37 +0200 Subject: disable azure Gbp-Pq: Name 0002-disable-azure.patch --- cmd/restic/global.go | 2 -- go.mod | 5 ----- 2 files changed, 7 deletions(-) diff --git a/cmd/restic/global.go b/cmd/restic/global.go index 63e13c3ae..2301ad32b 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -13,7 +13,6 @@ import ( "time" "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/backend/azure" "github.com/restic/restic/internal/backend/b2" "github.com/restic/restic/internal/backend/gs" "github.com/restic/restic/internal/backend/limiter" @@ -100,7 +99,6 @@ var internalGlobalCtx context.Context func init() { backends := location.NewRegistry() - backends.Register(azure.NewFactory()) backends.Register(b2.NewFactory()) backends.Register(gs.NewFactory()) backends.Register(local.NewFactory()) diff --git a/go.mod b/go.mod index e3bf39f27..d36a16667 100644 --- a/go.mod +++ b/go.mod @@ -2,9 +2,6 @@ module github.com/restic/restic require ( cloud.google.com/go/storage v1.31.0 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0 github.com/anacrolix/fuse v0.2.0 github.com/cenkalti/backoff/v4 v4.2.1 github.com/cespare/xxhash/v2 v2.2.0 @@ -40,8 +37,6 @@ require ( cloud.google.com/go/compute v1.23.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v1.1.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/felixge/fgprof v0.9.3 // indirect -- cgit v1.2.3 From 70dae3bc3677024c9216e21d9704824888bcc850 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A9lix=20Sipma?= Date: Sun, 15 Nov 2020 18:58:05 +0100 Subject: privacy breach Gbp-Pq: Name 0001-privacy-breach.patch --- README.md | 6 ------ 1 file changed, 6 deletions(-) diff --git a/README.md b/README.md index ad6b13cef..061164d6a 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,3 @@ -[![Documentation](https://readthedocs.org/projects/restic/badge/?version=latest)](https://restic.readthedocs.io/en/latest/?badge=latest) -[![Build Status](https://github.com/restic/restic/workflows/test/badge.svg)](https://github.com/restic/restic/actions?query=workflow%3Atest) -[![Go Report Card](https://goreportcard.com/badge/github.com/restic/restic)](https://goreportcard.com/report/github.com/restic/restic) - # Introduction restic is a backup program that is fast, efficient and secure. It supports the three major operating systems (Linux, macOS, Windows) and a few smaller ones (FreeBSD, OpenBSD). @@ -109,5 +105,3 @@ Sponsorship Backend integration tests for Google Cloud Storage and Microsoft Azure Blob Storage are sponsored by [AppsCode](https://appscode.com)! - -[![Sponsored by AppsCode](https://cdn.appscode.com/images/logo/appscode/ac-logo-color.png)](https://appscode.com) -- cgit v1.2.3 From c92b72b59113a1f79525983bd61baa52c342526a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A9lix=20Sipma?= Date: Mon, 23 Oct 2023 21:55:37 +0200 Subject: disable azure Gbp-Pq: Name 0002-disable-azure.patch --- cmd/restic/global.go | 2 -- go.mod | 5 ----- 2 files changed, 7 deletions(-) diff --git a/cmd/restic/global.go b/cmd/restic/global.go index cfca60521..be41a6393 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -13,7 +13,6 @@ import ( "time" "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/backend/azure" "github.com/restic/restic/internal/backend/b2" "github.com/restic/restic/internal/backend/gs" "github.com/restic/restic/internal/backend/limiter" @@ -100,7 +99,6 @@ var internalGlobalCtx context.Context func init() { backends := location.NewRegistry() - backends.Register(azure.NewFactory()) backends.Register(b2.NewFactory()) backends.Register(gs.NewFactory()) backends.Register(local.NewFactory()) diff --git a/go.mod b/go.mod index 71927c7b7..2475b8069 100644 --- a/go.mod +++ b/go.mod @@ -2,9 +2,6 @@ module github.com/restic/restic require ( cloud.google.com/go/storage v1.33.0 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 github.com/Backblaze/blazer v0.6.1 github.com/anacrolix/fuse v0.2.0 github.com/cenkalti/backoff/v4 v4.2.1 @@ -41,8 +38,6 @@ require ( cloud.google.com/go/compute v1.23.1 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v1.1.3 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.4.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/felixge/fgprof v0.9.3 // indirect -- cgit v1.2.3 From aeb04a4b7be5b2ecad2f250d4348c57d604a3933 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A9lix=20Sipma?= Date: Tue, 16 Jan 2024 15:00:34 +0100 Subject: privacy breach Last-Update: 2020-11-15 Forwarded: not-needed Gbp-Pq: Name 0001-privacy-breach.patch --- README.md | 6 ------ 1 file changed, 6 deletions(-) diff --git a/README.md b/README.md index ad6b13cef..061164d6a 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,3 @@ -[![Documentation](https://readthedocs.org/projects/restic/badge/?version=latest)](https://restic.readthedocs.io/en/latest/?badge=latest) -[![Build Status](https://github.com/restic/restic/workflows/test/badge.svg)](https://github.com/restic/restic/actions?query=workflow%3Atest) -[![Go Report Card](https://goreportcard.com/badge/github.com/restic/restic)](https://goreportcard.com/report/github.com/restic/restic) - # Introduction restic is a backup program that is fast, efficient and secure. It supports the three major operating systems (Linux, macOS, Windows) and a few smaller ones (FreeBSD, OpenBSD). @@ -109,5 +105,3 @@ Sponsorship Backend integration tests for Google Cloud Storage and Microsoft Azure Blob Storage are sponsored by [AppsCode](https://appscode.com)! - -[![Sponsored by AppsCode](https://cdn.appscode.com/images/logo/appscode/ac-logo-color.png)](https://appscode.com) -- cgit v1.2.3 From c8a47eacd3a25c4a81dfa67ea60db07a61aaec0b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A9lix=20Sipma?= Date: Tue, 16 Jan 2024 15:00:34 +0100 Subject: disable azure Last-Update: 2023-10-23 Forwarded: not-needed Gbp-Pq: Name 0002-disable-azure.patch --- cmd/restic/global.go | 2 -- go.mod | 5 ----- 2 files changed, 7 deletions(-) diff --git a/cmd/restic/global.go b/cmd/restic/global.go index c11aca615..c48c00482 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -13,7 +13,6 @@ import ( "time" "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/backend/azure" "github.com/restic/restic/internal/backend/b2" "github.com/restic/restic/internal/backend/gs" "github.com/restic/restic/internal/backend/limiter" @@ -100,7 +99,6 @@ var internalGlobalCtx context.Context func init() { backends := location.NewRegistry() - backends.Register(azure.NewFactory()) backends.Register(b2.NewFactory()) backends.Register(gs.NewFactory()) backends.Register(local.NewFactory()) diff --git a/go.mod b/go.mod index f07fc10ee..3f4e19808 100644 --- a/go.mod +++ b/go.mod @@ -2,9 +2,6 @@ module github.com/restic/restic require ( cloud.google.com/go/storage v1.34.0 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 github.com/Backblaze/blazer v0.6.1 github.com/anacrolix/fuse v0.2.0 github.com/cenkalti/backoff/v4 v4.2.1 @@ -41,8 +38,6 @@ require ( cloud.google.com/go/compute v1.23.1 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v1.1.3 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/felixge/fgprof v0.9.3 // indirect -- cgit v1.2.3 From bce5700e9cd44e1c510efd94f8feb7d5db63a4e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A9lix=20Sipma?= Date: Thu, 8 Feb 2024 19:30:36 +0100 Subject: privacy breach Last-Update: 2020-11-15 Forwarded: not-needed Gbp-Pq: Name 0001-privacy-breach.patch --- README.md | 6 ------ 1 file changed, 6 deletions(-) diff --git a/README.md b/README.md index ad6b13cef..061164d6a 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,3 @@ -[![Documentation](https://readthedocs.org/projects/restic/badge/?version=latest)](https://restic.readthedocs.io/en/latest/?badge=latest) -[![Build Status](https://github.com/restic/restic/workflows/test/badge.svg)](https://github.com/restic/restic/actions?query=workflow%3Atest) -[![Go Report Card](https://goreportcard.com/badge/github.com/restic/restic)](https://goreportcard.com/report/github.com/restic/restic) - # Introduction restic is a backup program that is fast, efficient and secure. It supports the three major operating systems (Linux, macOS, Windows) and a few smaller ones (FreeBSD, OpenBSD). @@ -109,5 +105,3 @@ Sponsorship Backend integration tests for Google Cloud Storage and Microsoft Azure Blob Storage are sponsored by [AppsCode](https://appscode.com)! - -[![Sponsored by AppsCode](https://cdn.appscode.com/images/logo/appscode/ac-logo-color.png)](https://appscode.com) -- cgit v1.2.3 From 85867d64a335a021eadda0f3057ab7c9df222628 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A9lix=20Sipma?= Date: Thu, 8 Feb 2024 19:30:36 +0100 Subject: disable azure Last-Update: 2023-10-23 Forwarded: not-needed Gbp-Pq: Name 0002-disable-azure.patch --- cmd/restic/global.go | 2 -- go.mod | 5 ----- 2 files changed, 7 deletions(-) diff --git a/cmd/restic/global.go b/cmd/restic/global.go index e979dcc2b..0017cf80f 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -13,7 +13,6 @@ import ( "time" "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/backend/azure" "github.com/restic/restic/internal/backend/b2" "github.com/restic/restic/internal/backend/gs" "github.com/restic/restic/internal/backend/limiter" @@ -101,7 +100,6 @@ var internalGlobalCtx context.Context func init() { backends := location.NewRegistry() - backends.Register(azure.NewFactory()) backends.Register(b2.NewFactory()) backends.Register(gs.NewFactory()) backends.Register(local.NewFactory()) diff --git a/go.mod b/go.mod index 970770a90..5cd89a51b 100644 --- a/go.mod +++ b/go.mod @@ -2,9 +2,6 @@ module github.com/restic/restic require ( cloud.google.com/go/storage v1.34.0 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 github.com/Backblaze/blazer v0.6.1 github.com/anacrolix/fuse v0.2.0 github.com/cenkalti/backoff/v4 v4.2.1 @@ -43,8 +40,6 @@ require ( cloud.google.com/go/compute v1.23.1 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v1.1.3 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/felixge/fgprof v0.9.3 // indirect -- cgit v1.2.3