summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNikolaus Rath <Nikolaus@rath.org>2016-10-28 14:07:23 -0700
committerNikolaus Rath <Nikolaus@rath.org>2016-10-28 14:07:23 -0700
commit0ff390756d6be25ba06c24bcfa0589d646e0cec6 (patch)
treee2c069395ffcdd0aaf49c7e2c004a8c5387f59d8
Import s3ql_2.21+dfsg-1.debian.tar.xz
[dgit import tarball s3ql 2.21+dfsg-1 s3ql_2.21+dfsg-1.debian.tar.xz]
-rw-r--r--.git-dpm11
-rw-r--r--NEWS78
-rw-r--r--README.Debian22
-rw-r--r--README.source20
-rw-r--r--changelog416
-rw-r--r--compat1
-rw-r--r--control106
-rw-r--r--copyright24
-rw-r--r--patches/clock-granularity.diff32
-rw-r--r--patches/ignore_cython_warnings.diff34
-rw-r--r--patches/proc_mount.diff32
-rw-r--r--patches/series5
-rw-r--r--patches/show_pdflatex_output.diff30
-rw-r--r--patches/support_jessie_upgrade.diff876
-rw-r--r--py3dist-overrides3
-rwxr-xr-xrules112
-rw-r--r--s3ql-dbg.links1
-rw-r--r--s3ql.doc-base11
-rw-r--r--s3ql.docs2
-rw-r--r--s3ql.examples1
-rw-r--r--s3ql.install4
-rw-r--r--s3ql.links3
-rw-r--r--source/format1
-rw-r--r--source/include-binaries2
-rw-r--r--source/options2
-rw-r--r--tests/control10
-rwxr-xr-xtests/upstream-standard7
-rwxr-xr-xtests/upstream-with-fuse4
-rw-r--r--upstream-signing-key.pgpbin0 -> 2722 bytes
-rw-r--r--watch9
30 files changed, 1859 insertions, 0 deletions
diff --git a/.git-dpm b/.git-dpm
new file mode 100644
index 0000000..cec922a
--- /dev/null
+++ b/.git-dpm
@@ -0,0 +1,11 @@
+# see git-dpm(1) from git-dpm package
+f2e46743e3b446d31cdda56846e7d2e7c384e2a0
+f2e46743e3b446d31cdda56846e7d2e7c384e2a0
+7b20150ca23d8ce6e325799dbf735d26dfd0f03e
+7b20150ca23d8ce6e325799dbf735d26dfd0f03e
+s3ql_2.21+dfsg.orig.tar.xz
+8e720bc48e67a202d85af5e60812c5bceda687fd
+530132
+debianTag="debian/%e%v"
+patchedTag="patched/%e%v"
+upstreamTag="upstream/%e%u"
diff --git a/NEWS b/NEWS
new file mode 100644
index 0000000..f79ba3f
--- /dev/null
+++ b/NEWS
@@ -0,0 +1,78 @@
+s3ql (2.17+dfsg-1) unstable; urgency=medium
+
+ The internal file system revision has changed. File systems created
+ with S3QL 2.17 or newer are not compatible with prior S3QL
+ versions. To update an existing file system to the newest revision,
+ use the `s3qladm upgrade` command.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Thu, 10 Mar 2016 08:13:37 -0800
+
+s3ql (2.14+dfsg-1) unstable; urgency=medium
+
+ The internal file system revision has changed. File systems created
+ with S3QL 2.14 or newer are not compatible with prior S3QL
+ versions. To update an existing file system to the newest revision,
+ use the 's3qladm upgrade' command.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Wed, 12 Aug 2015 14:57:22 -0700
+
+s3ql (2.10.1+dfsg-1) unstable; urgency=medium
+
+ The internal file system revision has changed. File systems created
+ with S3QL 2.10 or newer are not compatible with prior S3QL
+ versions. To update an existing file system to the newest revision,
+ use the 's3qladm upgrade' command.
+
+ It is strongly recommended to run the (new) s3ql_verify command with
+ the --data option at shortly after the upgrade. This is necessary to
+ ensure that the upgrade to the next (2.11) S3QL release will run
+ smoothly.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Sun, 03 Aug 2014 20:49:06 -0700
+
+s3ql (2.5-1) unstable; urgency=low
+
+ * The file system structure has changed, existing file systems
+ need to be upgraded using "s3qladm upgrade" before they can
+ be used.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Sat, 19 Oct 2013 16:34:46 -0700
+
+s3ql (1.11-1) unstable; urgency=low
+
+ * The file system structure has changed, existing file systems
+ need to be upgraded using "s3qladm upgrade" before they can
+ be used.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Sun, 29 Apr 2012 15:23:20 -0400
+
+s3ql (1.9-1) unstable; urgency=low
+
+ * The file system structure has changed, existing file systems
+ need to be upgraded using "s3qladm upgrade" before they can
+ be used.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Sat, 21 Jan 2012 13:49:59 -0500
+
+s3ql (1.7-1) unstable; urgency=low
+
+ * The file system structure has changed, existing file systems
+ need to be upgraded using "s3qladm upgrade" before they can
+ be used.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Sun, 27 Nov 2011 16:43:06 -0500
+
+s3ql (1.4-1) unstable; urgency=low
+
+ Notes for upgrading from version 1.0 or earlier:
+ ------------------------------------------------
+
+ * The file system structure has changed, existing file systems
+ need to be upgraded using "s3qladm upgrade" before they can
+ be used.
+
+ * The format and default name of the authentication file has
+ changed. The default location is now ~/.s3ql/authinfo2.
+ Please refer to the documentation for the file format.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Thu, 06 Oct 2011 11:25:44 -0400
diff --git a/README.Debian b/README.Debian
new file mode 100644
index 0000000..3558364
--- /dev/null
+++ b/README.Debian
@@ -0,0 +1,22 @@
+S3QL for Debian
+---------------
+
+
+Q: Where do I find the contents of the contrib/ directory in the
+ upstream tarball?
+
+A: The Python scripts are installed into /usr/lib/s3ql. Manpages have
+ been placed in the proper system directory (/usr/man/man1) and can
+ be viewed with the standard `man` command. The example sample
+ backup script (s3ql_backup.sh) is installed in
+ /usr/share/doc/s3ql/examples.
+
+ Additionally, some scripts have been linked into /usr/bin under
+ different names (in these cases the manpages have been renamed
+ accordingly):
+
+ Installed Name: Original name:
+ -------------------------------------------------------------
+ expire_backups expire_backups.py
+ parallel-cp pcp.py
+ s3ql_remove_objects remove_objects.py
diff --git a/README.source b/README.source
new file mode 100644
index 0000000..e2a53fa
--- /dev/null
+++ b/README.source
@@ -0,0 +1,20 @@
+
+* As requested by ftpmaster, the upstream tarball is repacked to
+ remove minified javascript files.
+
+* Since removing just the minified javascript files would leave the
+ rest of them broken, we are removing the entire pre-rendered HTML
+ documentation. There is no loss of information, since the entire
+ documentation (including the problematic javascript files) is
+ regenerated during build.
+
+* Repackaging of the upstream tarball is done with uscan. The list of
+ excluded files is specified in the Files-Excluded paragraph in
+ debian/copyright. To retrieve and repack the latest upstream
+ tarball, run
+
+ # PERL_LWP_SSL_VERIFY_HOSTNAME=0 uscan --repack
+
+ (Deactivating hostname verification isn't a problem because the
+ upstream tarball is GPG signed, but necessary to work around
+ bug #749225.)
diff --git a/changelog b/changelog
new file mode 100644
index 0000000..72e236a
--- /dev/null
+++ b/changelog
@@ -0,0 +1,416 @@
+s3ql (2.21+dfsg-1) unstable; urgency=medium
+
+ * New upstream version
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Fri, 28 Oct 2016 14:07:23 -0700
+
+s3ql (2.20+dfsg-1) unstable; urgency=medium
+
+ * new upstream version
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Wed, 24 Aug 2016 12:10:58 -0700
+
+s3ql (2.19+dfsg-1) unstable; urgency=medium
+
+ * new upstream version
+ * Show full pdflatex output during build
+ * Added missing texlive-generic-extra dependency.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Mon, 27 Jun 2016 12:00:39 -0700
+
+s3ql (2.18+dfsg-1) unstable; urgency=medium
+
+ * Enable debug logging for s3ql.verify when running unit tests during
+ build.
+ * Moved packaging to git and collab-maint on alioth.
+ * New upstream release.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Fri, 29 Apr 2016 19:08:53 -0700
+
+s3ql (2.17.1+hg2+dfsg-3) unstable; urgency=medium
+
+ * Adding python3-pytest-catchlog dependency for unit tests.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Sun, 13 Mar 2016 14:39:48 -0700
+
+s3ql (2.17.1+hg2+dfsg-2) unstable; urgency=medium
+
+ * Fixed up broken rebase of patches/support_jessie_upgrade.diff
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Fri, 11 Mar 2016 19:19:12 -0800
+
+s3ql (2.17.1+hg2+dfsg-1) unstable; urgency=medium
+
+ * Added forgotten NEWS entry for 2.17+dfsg-1
+ * New release from upstream repository snapshot.
+ * Run tests with (some) debug logging enabled to debug sporadic test
+ failure of tests/t3_verify.py.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Fri, 11 Mar 2016 16:18:25 -0800
+
+s3ql (2.17.1+dfsg-1) unstable; urgency=medium
+
+ * New upstream release.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Wed, 09 Mar 2016 14:33:57 -0800
+
+s3ql (2.17+dfsg-1) unstable; urgency=medium
+
+ * New upstream version.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Wed, 09 Mar 2016 11:13:23 -0800
+
+s3ql (2.16+dfsg-1) unstable; urgency=medium
+
+ * Added dependency on python3-systemd.
+ * New upstream release.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Tue, 23 Feb 2016 15:44:51 -0800
+
+s3ql (2.15+dfsg-1) unstable; urgency=medium
+
+ * Add Breaks and Replaces to prevent upgrade problems due to the
+ extension for the debug interpreter having moved from s3ql-dbg to s3ql in
+ 2.14+dfsg-1. Closes: #799261.
+ * New upstream release.
+ * Dropped fix_setattr_test.diff (fixed upstream).
+ * Added use_cython3.diff and ignore_cython_warnings.diff to handle
+ Debian-specific Cython peculiarities.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Fri, 25 Sep 2015 13:50:51 -0700
+
+s3ql (2.14+dfsg-1) unstable; urgency=medium
+
+ * New upstream release.
+ * Dropped patches/check_dev_fuse_perms.diff (integrated upstream).
+ * Added /usr/bin/s3ql_remove_objects
+ * Added build-depends on fuse, rsync and psmisc (required for unit tests).
+ * Python extensions compiled for the debugging interpreter are now
+ actually shipped in the s3ql-dbg package (rather than the s3ql
+ package).
+ * Added patches/support_jessie_upgrade.diff (upstream dropped
+ compatibility with jessie's S3QL version).
+ * Added patches/fix_setattr_test.diff to un-break a unit test
+ that dependended on undocumented behavior in python3-llfuse (which changed
+ recently). Closes: #798596.
+ * Added lost changelog entry for 2.11.1+dfsg-2. Closes: #798595.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Thu, 10 Sep 2015 13:40:00 -0700
+
+s3ql (2.13+dfsg-2) unstable; urgency=medium
+
+ * Tighten dependency on python3-dugong, python3-apsw, and
+ python3-llfuse. Closes: #778487.
+
+ * Added missing build-dependency on python3-llfuse-dbg.
+ Closes: #790525.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Wed, 01 Jul 2015 02:33:26 -0800
+
+s3ql (2.13+dfsg-1) unstable; urgency=medium
+
+ * New upstream release.
+ * Dropped patches/fix_test_check_objects_temp.diff (fixed upstream).
+ * Dropped patches/debug_test_thread_hang.diff (fixed upstream).
+ * Fixed a bug that caused fsck.s3ql to either abort with a
+ "apsw.ConstraintError" or to incorrectly consider storage objects
+ as missing when the connection to remote server is interrupted
+ while retrieving the object list. Closes: #771452.
+ * Fixed a problem where mount.s3ql would crash when unmouting the
+ file system because it could not delete the cache directory. This
+ could happen when the file system was not unmounted cleanly, but
+ fsck.s3ql was then run on a different system (or using a different
+ cache directory). Closes: #772052.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Sat, 31 Jan 2015 20:16:12 -0800
+
+s3ql (2.12+dfsg-2) experimental; urgency=medium
+
+ * Added patches/debug_test_thread_hang.diff to debug occasional test
+ failures on buildds.
+ * Added patches/fix_test_check_objects_temp.diff (cherry-picked
+ upstream) to fix occasional test failures during build.
+ * Upload sponsored by Petter Reinholdtsen.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Mon, 10 Nov 2014 18:58:07 -0800
+
+s3ql (2.12+dfsg-1) experimental; urgency=medium
+
+ * New upstream release.
+ * Update Standards-Version to 3.9.6 (no changes needed).
+ * Upload sponsored by Petter Reinholdtsen.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Sun, 09 Nov 2014 15:05:23 -0800
+
+s3ql (2.11.1+dfsg-2) unstable; urgency=medium
+
+ * Fixed a problem with fsck.s3ql aborting with an
+ "apsw.ConstraintError" or incorrectly considering storage
+ objects as missing when the connection to remote server is
+ interrupted. Closes: #771452.
+ * Upload sponsored by Petter Reinholdtsen.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Tue, 02 Dec 2014 21:44:27 -0800
+
+s3ql (2.11.1+dfsg-1) unstable; urgency=medium
+
+ * New upstream release.
+ * Dropped patches/cve_2014_0485.diff, integrated upstream.
+ * Dropped patches/fix_failsafe_test_race.diff, integrated upstream.
+ * Upload sponsored by Petter Reinholdtsen.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Thu, 04 Sep 2014 18:28:41 -0700
+
+s3ql (2.10.1+dfsg-4) unstable; urgency=high
+
+ * SECURITY UPDATE for CVE-2014-0485.
+
+ A remote code execution vulnerability was fixed.
+
+ An attacker with control over the communication with the storage
+ backend or the ability to manipulate the data stored in the
+ backend was able to trigger execution of arbitrary code by
+ mount.s3ql, fsck.s3ql, mkfs.s3ql, s3qladm and s3ql_verify. Both
+ encrypted and unencrypted file systems were vulnerable.
+
+ * Upload sponsored by Petter Reinholdtsen.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Mon, 25 Aug 2014 17:21:09 -0700
+
+s3ql (2.10.1+dfsg-3) unstable; urgency=medium
+
+ * Added fix_failsafe_test_race.diff. Closes: #758013.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Wed, 20 Aug 2014 17:30:03 -0700
+
+s3ql (2.10.1+dfsg-2) unstable; urgency=medium
+
+ * Bumped python3-dugong dependency to 3.3 to fix build failures
+ on kFreeBSD (and presumably also similar problems when running
+ the software on both kFreeBSD and Linux).
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Wed, 06 Aug 2014 18:55:19 -0700
+
+s3ql (2.10.1+dfsg-1) unstable; urgency=medium
+
+ * Made system clock resolution heuristic in unittests even more
+ conservative, there are still occasional build failures on i386.
+ * New upstream release.
+ * Dropped patches (integrated upstream):
+ - C_locale_compat.diff
+ - kfreebsd_compat.diff
+ - sphinx_build_locale_fix.diff
+ * Bumped python3-dugong dependency to 3.2
+ * Upload sponsored by Petter Reinholdtsen.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Sun, 03 Aug 2014 20:49:06 -0700
+
+s3ql (2.9+dfsg-2) unstable; urgency=medium
+
+ * Cherry-picked kFreeBSD compatibility patch from upstream
+ (Closes: #755358)
+ * Cherry-picked upstream patch to support testing with C
+ locale.
+ * Upload sponsored by Petter Reinholdtsen.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Sat, 19 Jul 2014 20:24:55 -0700
+
+s3ql (2.9+dfsg-1) unstable; urgency=medium
+
+ * New upstream release.
+ - Fix automatic self test when running as root (Closes: #745476)
+ * Dropped patches/sphinx-1.1.1-compat.diff (no longer required now
+ that Sphinx 1.2 is in unstable).
+ * Added patches/sphinx_build_locale_fix.diff (cherry-picked from upstream)
+ to make documentation build work independent of system locale.
+ * Added patches/check_dev_fuse_perms.diff to avoid running FUSE
+ tests on continuous integration servers where /dev/fuse is
+ not accessible.
+ * Upload sponsored by Petter Reinholdtsen.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Sat, 28 Jun 2014 12:30:45 -0700
+
+s3ql (2.8.1+dfsg-1) unstable; urgency=low
+
+ * Updated upstream project URL.
+ * Added upstream PGP key and configured watch file for automatic
+ verification.
+ * New upstream release.
+ * Do not crash when building with snapshot Cython versions
+ (Closes: #742760)
+ * Update Standards-Version to 3.9.5 (no changes needed).
+ * Repackaged to exclude minified javascript files.
+ * Upload sponsored by Petter Reinholdtsen.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Fri, 14 Mar 2014 19:11:06 -0700
+
+s3ql (2.7-1) unstable; urgency=low
+
+ * New upstream release.
+ * Estimate clock resolution more conservatively when running unit
+ tests.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Mon, 16 Dec 2013 20:21:41 -0800
+
+s3ql (2.5-2) unstable; urgency=low
+
+ * Do not try to write logfiles into $HOME when running self tests.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Sun, 20 Oct 2013 11:38:32 -0700
+
+s3ql (2.5-1) unstable; urgency=low
+
+ * Added patch for Sphinx 1.1.3 compatibility.
+ * New upstream release.
+ * Fixed autopkgtests (this time a missing PYTHONPATH, LP: #1223514).
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Tue, 27 Aug 2013 20:17:06 -0700
+
+s3ql (1.16-2) unstable; urgency=low
+
+ * Updated uploader email address.
+ * Removed obsolete lintian override.
+ * Extended clean target, re-build should now work.
+ * Updated package description.
+ * Bump build dependency on Cython to 0.17
+ * Fixed autopkgtests to use installed S3QL.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Thu, 18 Jul 2013 21:41:29 -0700
+
+s3ql (1.16-1) unstable; urgency=low
+
+ * New upstream release.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Fri, 12 Jul 2013 21:11:01 -0700
+
+s3ql (1.15-1) unstable; urgency=low
+
+ [ Jakub Wilk ]
+ * Use canonical URIs for Vcs-* fields.
+
+ [ Nikolaus Rath ]
+ * New upstream release.
+ * Added autopkgtest data.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Sat, 15 Jun 2013 15:06:37 -0700
+
+s3ql (1.14-1) unstable; urgency=low
+
+ * Update standards version to 3.9.4, no other changes
+ required.
+ * New upstream release.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Tue, 09 Apr 2013 19:08:15 -0700
+
+s3ql (1.13.1-1) unstable; urgency=low
+
+ * New upstream version.
+ * Fixed metadata corruption with recent eglibc (triggered test
+ failure on build). Closes: #701350
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Sat, 23 Feb 2013 17:46:56 -0800
+
+s3ql (1.12-1) unstable; urgency=low
+
+ * New upstream release.
+ * Bumped debhelper compatibility level to 9 to get support for
+ dpkg-buildflags.
+ * Added lintian override for hardening-no-fortify-functions,
+ this is a false positive.
+ * Added patches/buildd-fix.diff to prevent errors when trying
+ to write logs during autobuilding.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Wed, 05 Sep 2012 20:22:18 -0400
+
+s3ql (1.11.1-2) unstable; urgency=low
+
+ * Add dependency on python-pkg-resources. Closes: 672916.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Tue, 15 May 2012 18:41:38 -0400
+
+s3ql (1.11.1-1) unstable; urgency=low
+
+ * New upstream release.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Fri, 04 May 2012 11:12:29 -0400
+
+s3ql (1.11-1) unstable; urgency=low
+
+ * New upstream release.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Sun, 29 Apr 2012 15:22:56 -0400
+
+s3ql (1.10-1) unstable; urgency=low
+
+ * Bumped standards to 3.9.3, no changes required.
+ * New upstream release.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Fri, 24 Feb 2012 21:51:01 -0500
+
+s3ql (1.9-1) unstable; urgency=low
+
+ * Depend on python >= 2.7 (now required by upstream). Closes: #653641.
+ * New upstream release.
+ * Dropped obsolete argparse.diff patch.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Sat, 21 Jan 2012 13:38:25 -0500
+
+s3ql (1.8.1-1) unstable; urgency=low
+
+ * Add versioned depends on python-argparse. Closes: #652751.
+ * New upstream release.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Mon, 05 Dec 2011 20:36:44 -0500
+
+s3ql (1.7-1) unstable; urgency=low
+
+ * New upstream release.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Sun, 27 Nov 2011 14:40:58 -0500
+
+s3ql (1.6-1) unstable; urgency=low
+
+ * New upstream release.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Tue, 22 Nov 2011 11:04:35 -0500
+
+s3ql (1.5-1) unstable; urgency=low
+
+ * New upstream release.
+ * Don't crash when using Google Storage backend. Closes: #646232.
+ * Use dh_sphinxdoc.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Mon, 24 Oct 2011 19:16:12 -0400
+
+s3ql (1.4-1) unstable; urgency=low
+
+ * New upstream release.
+
+ * Depend on fuse | fusebsd. Closes: #634337.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Thu, 06 Oct 2011 11:20:25 -0400
+
+s3ql (1.2-1) unstable; urgency=low
+
+ * New upstream release.
+ * Depend on python-llfuse >= 0.36, API was changed in non-backwards
+ compatible way.
+ * Depend on Python >= 2.6.6, previous versions unusable due to
+ Python bug 6312.
+ * Don't ship our own version of underscore JavaScript library, depend
+ on libjs-underscore package instead.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Wed, 28 Sep 2011 21:00:04 -0400
+
+s3ql (1.0.1-2) unstable; urgency=low
+
+ * Install pcp as parallel-cp to avoid file name conflict with
+ pcp ("Performance Co-Pilot") package. Closes: #632473.
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Sun, 03 Jul 2011 10:44:00 -0400
+
+s3ql (1.0.1-1) unstable; urgency=low
+
+ * First official debian release. Closes: #626651
+
+ -- Nikolaus Rath <Nikolaus@rath.org> Fri, 01 Jul 2011 14:02:17 -0400
diff --git a/compat b/compat
new file mode 100644
index 0000000..ec63514
--- /dev/null
+++ b/compat
@@ -0,0 +1 @@
+9
diff --git a/control b/control
new file mode 100644
index 0000000..21b6fd3
--- /dev/null
+++ b/control
@@ -0,0 +1,106 @@
+Source: s3ql
+Section: misc
+Priority: optional
+X-Python-Version: >= 3.3
+Maintainer: Nikolaus Rath <Nikolaus@rath.org>
+Uploaders: Debian Python Apps Team <python-apps-team@lists.alioth.debian.org>
+Build-Depends: debhelper (>= 9),
+ python3 (>= 3.3),
+ python3-dev (>= 3.3),
+ python3-dbg,
+ python3-setuptools (>= 0.6.14),
+ python3-apsw (>= 3.7.0),
+ python3-sphinx (>= 1.2),
+ python3-llfuse (>= 1.0), python3-llfuse (<< 2.0),
+ python3-llfuse-dbg,
+ python3-crypto,
+ python3-requests,
+ python3-dugong (>= 3.4),
+ python3-pytest (>= 2.3.3),
+ python3-pytest-catchlog,
+ python3-defusedxml,
+ cython3 (>= 0.23),
+ texlive-latex-base,
+ texlive-latex-recommended,
+ texlive-latex-extra,
+ texlive-generic-extra,
+ texlive-fonts-recommended,
+ libsqlite3-dev (>= 3.7.0),
+ rsync,
+ fuse [linux-any], fuse4bsd [kfreebsd-any],
+ psmisc
+Standards-Version: 3.9.6
+Homepage: https://bitbucket.org/nikratio/s3ql/
+Vcs-Git: git://anonscm.debian.org/collab-maint/s3ql.git
+Vcs-Browser: https://anonscm.debian.org/gitweb/?p=collab-maint/s3ql.git
+
+Package: s3ql
+Architecture: any
+Depends: ${misc:Depends},
+ ${python3:Depends},
+ ${shlibs:Depends},
+ ${sphinxdoc:Depends},
+ fuse [linux-any] | fuse4bsd [kfreebsd-any],
+ psmisc,
+ python3-pkg-resources
+Recommends: python3-systemd
+Description: Full-featured file system for online data storage
+ S3QL is a file system that stores all its data online using storage
+ services like Google Storage, Amazon S3 or OpenStack. S3QL
+ effectively provides a hard disk of dynamic, infinite capacity that
+ can be accessed from any computer with internet access.
+ .
+ S3QL is a standard conforming, full featured UNIX file system that is
+ conceptually indistinguishable from any local file
+ system. Furthermore, S3QL has additional features like compression,
+ encryption, data de-duplication, immutable trees and snapshotting
+ which make it especially suitable for online backup and archival.
+ .
+ In addition to accessing online storage services directoly, S3QL can
+ also store its data underneath a regular mount point. This enables
+ the use of S3QL with e.g. NFS, CIFS or sftp servers.
+ .
+ S3QL is designed to favor simplicity and elegance over performance
+ and feature-creep. Care has been taken to make the source code as
+ readable and serviceable as possible. Solid error detection and error
+ handling have been included from the very first line, and S3QL comes
+ with extensive automated test cases.
+
+Package: s3ql-dbg
+Architecture: any
+Section: debug
+Priority: extra
+Recommends: python3-dbg,
+ python3-llfuse-dbg,
+ python3-apsw-dbg,
+ python3-crypto-dbg,
+Depends: s3ql (= ${binary:Version}),
+ ${python3:Depends},
+ ${shlibs:Depends},
+ ${misc:Depends}
+Replaces: s3ql (<< 2.14+dfsg-1)
+Breaks: s3ql (<< 2.14+dfsg-1)
+Description: Full-featured file system for online data storage (debugging symbols)
+ S3QL is a file system that stores all its data online using storage
+ services like Google Storage, Amazon S3 or OpenStack. S3QL
+ effectively provides a hard disk of dynamic, infinite capacity that
+ can be accessed from any computer with internet access.
+ .
+ S3QL is a standard conforming, full featured UNIX file system that is
+ conceptually indistinguishable from any local file
+ system. Furthermore, S3QL has additional features like compression,
+ encryption, data de-duplication, immutable trees and snapshotting
+ which make it especially suitable for online backup and archival.
+ .
+ In addition to accessing online storage services directoly, S3QL can
+ also store its data underneath a regular mount point. This enables
+ the use of S3QL with e.g. NFS, CIFS or sftp servers.
+ .
+ S3QL is designed to favor simplicity and elegance over performance
+ and feature-creep. Care has been taken to make the source code as
+ readable and serviceable as possible. Solid error detection and error
+ handling have been included from the very first line, and S3QL comes
+ with extensive automated test cases.
+ .
+ This package contains the debugging symbols as well as the extension
+ built for the Python 3 debug interpreter.
diff --git a/copyright b/copyright
new file mode 100644
index 0000000..8e1c4f7
--- /dev/null
+++ b/copyright
@@ -0,0 +1,24 @@
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: s3ql
+Upstream-Contact: s3ql@googlegroups.com
+Source: https://bitbucket.org/nikratio/s3ql/
+Files-Excluded: doc/html
+
+Files: *
+Copyright: Copyright (c) 1998 - 2012 Nikolaus Rath <Nikolaus@rath.org>
+License: GPL-3
+
+License: GPL-3
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License Version 3 as
+ published by the Free Software Foundation.
+ .
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+ .
+ On Debian systems the full text of the GNU General Public License
+ Version 3 can be found in the `/usr/share/common-licenses/GPL-3'
+ file.
+
diff --git a/patches/clock-granularity.diff b/patches/clock-granularity.diff
new file mode 100644
index 0000000..64a1d53
--- /dev/null
+++ b/patches/clock-granularity.diff
@@ -0,0 +1,32 @@
+From 324ac9933b9b4d2f477942c8b1f1c54241933a76 Mon Sep 17 00:00:00 2001
+From: Nikolaus Rath <Nikolaus@rath.org>
+Date: Tue, 23 Feb 2016 15:44:51 -0800
+Subject: Estimate system clock granularity more conservatively
+
+Origin: debian
+Forwarded: no
+Patch-Name: clock-granularity.diff
+
+Some unit tests check that file access time stamps are updated
+correctly. For such a test to succeed, it's necessarily that the time
+between two subsequent test-accesses can actually be resolved. The
+heuristic to determine this granularity doesn't seem to work well on
+e.g. the i386 buildd, so we make it much more conservative. This means
+the test suite runs longer, but otherwise has no ill effects.
+---
+ tests/common.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tests/common.py b/tests/common.py
+index 93736..af5ed 100644
+--- a/tests/common.py
++++ b/tests/common.py
+@@ -31,7 +31,7 @@ def get_clock_granularity():
+ stamp2 = time.time()
+ resolution = min(resolution, stamp2 - stamp1)
+ time.sleep(0.01)
+- return resolution
++ return max(1, 10 * resolution)
+ CLOCK_GRANULARITY = get_clock_granularity()
+
+ # When testing, we want to make sure that we don't sleep for too short a time
diff --git a/patches/ignore_cython_warnings.diff b/patches/ignore_cython_warnings.diff
new file mode 100644
index 0000000..48188f7
--- /dev/null
+++ b/patches/ignore_cython_warnings.diff
@@ -0,0 +1,34 @@
+From c1a178861185e4f94feb36e7a4f7ff6da644b66f Mon Sep 17 00:00:00 2001
+From: Nikolaus Rath <Nikolaus@rath.org>
+Date: Tue, 23 Feb 2016 15:44:51 -0800
+Subject: Ignore compiler warnings due to old Cython version
+
+Origin: debian
+Forwarded: not-needed
+Patch-Name: ignore_cython_warnings.diff
+
+Cython versions prior to 0.24 generate Gcc compiler warnings.
+Since Cython 0.24 isn't in Debian yet, we ignore these warnings.
+---
+ setup.py | 8 --------
+ 1 file changed, 8 deletions(-)
+
+diff --git a/setup.py b/setup.py
+index ae1c0..e2c9a 100755
+--- a/setup.py
++++ b/setup.py
+@@ -111,14 +111,6 @@ def main():
+
+ compile_args = ['-Wall', '-Wextra', '-Wconversion', '-Wsign-compare']
+
+- # Value-changing conversions should always be explicit.
+- compile_args.append('-Werror=conversion')
+-
+- # Note that (i > -1) is false if i is unsigned (-1 will be converted to
+- # a large positive value). We certainly don't want to do this by
+- # accident.
+- compile_args.append('-Werror=sign-compare')
+-
+ # Enable all fatal warnings only when compiling from Mercurial tip.
+ # (otherwise we break forward compatibility because compilation with newer
+ # compiler may fail if additional warnings are added)
diff --git a/patches/proc_mount.diff b/patches/proc_mount.diff
new file mode 100644
index 0000000..5299ec8
--- /dev/null
+++ b/patches/proc_mount.diff
@@ -0,0 +1,32 @@
+From c9ba5b59e80b5468e17c900a93cfa549b234a9ff Mon Sep 17 00:00:00 2001
+From: Nikolaus Rath <Nikolaus@rath.org>
+Date: Tue, 23 Feb 2016 15:44:51 -0800
+Subject: Skip tests requiring /proc
+
+Origin: debian
+Forwarded: no
+Patch-Name: proc_mount.diff
+---
+ tests/t4_adm.py | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/tests/t4_adm.py b/tests/t4_adm.py
+index a1726..36e7f 100755
+--- a/tests/t4_adm.py
++++ b/tests/t4_adm.py
+@@ -19,6 +19,7 @@ import tempfile
+ import unittest
+ import subprocess
+ import pytest
++import os
+
+ @pytest.mark.usefixtures('s3ql_cmd_argv', 'pass_reg_output')
+ class AdmTests(unittest.TestCase):
+@@ -49,6 +50,7 @@ class AdmTests(unittest.TestCase):
+ self.reg_output(r'^WARNING: Maximum object sizes less than '
+ '1 MiB will degrade performance\.$', count=1)
+
++ @unittest.skipUnless(os.path.exists('/proc/mounts'), '/proc/mounts not available')
+ def test_passphrase(self):
+ self.mkfs()
+
diff --git a/patches/series b/patches/series
new file mode 100644
index 0000000..38dc7f4
--- /dev/null
+++ b/patches/series
@@ -0,0 +1,5 @@
+proc_mount.diff
+clock-granularity.diff
+ignore_cython_warnings.diff
+support_jessie_upgrade.diff
+show_pdflatex_output.diff
diff --git a/patches/show_pdflatex_output.diff b/patches/show_pdflatex_output.diff
new file mode 100644
index 0000000..cbb9601
--- /dev/null
+++ b/patches/show_pdflatex_output.diff
@@ -0,0 +1,30 @@
+From f2e46743e3b446d31cdda56846e7d2e7c384e2a0 Mon Sep 17 00:00:00 2001
+From: Nikolaus Rath <Nikolaus@rath.org>
+Date: Mon, 27 Jun 2016 11:25:34 -0700
+Subject: Don't hide pdflatex output.
+
+Origin: debian
+Forwarded: not-needed
+Patch-Name: show_pdflatex_output.diff
+
+Upstream is not interested in this patch.
+---
+ setup.py | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/setup.py b/setup.py
+index e2c9a..375c7 100755
+--- a/setup.py
++++ b/setup.py
+@@ -97,9 +97,8 @@ class build_docs(setuptools.Command):
+
+ print('Running pdflatex...')
+ for _ in range(3):
+- with open('/dev/null', 'wb') as null:
+- subprocess.check_call(['pdflatex', '-interaction', 'batchmode', 'manual.tex'],
+- cwd=os.path.join(dest_dir, 'latex'), stdout=null)
++ subprocess.check_call(['pdflatex', '-interaction', 'nonstopmode', 'manual.tex'],
++ cwd=os.path.join(dest_dir, 'latex'))
+ os.rename(os.path.join(dest_dir, 'latex', 'manual.pdf'),
+ os.path.join(dest_dir, 'manual.pdf'))
+
diff --git a/patches/support_jessie_upgrade.diff b/patches/support_jessie_upgrade.diff
new file mode 100644
index 0000000..4929879
--- /dev/null
+++ b/patches/support_jessie_upgrade.diff
@@ -0,0 +1,876 @@
+From fb58feac206d5a1987b8c7f74e5ee89b18f87dc6 Mon Sep 17 00:00:00 2001
+From: Nikolaus Rath <Nikolaus@rath.org>
+Date: Tue, 23 Feb 2016 15:44:51 -0800
+Subject: Allow upgrade of file systems created with jessie's s3ql
+
+Origin: debian
+Forwarded: not-needed
+Patch-Name: support_jessie_upgrade.diff
+
+Upstream has dropped support for upgrading file systems created with
+the S3QL version in jessie. This patch forward-ports this capability.
+It is based on upstream's Mercurial commit 773931c43368.
+---
+ src/s3ql/adm.py | 205 +++++++++++++++++++++--
+ src/s3ql/backends/comprenc.py | 371 ++++++++++++++++++++++++++++++++++++++----
+ src/s3ql/backends/local.py | 12 +-
+ src/s3ql/backends/s3c.py | 29 +++-
+ src/s3ql/upgrade_support.py | 75 +++++++++
+ 5 files changed, 647 insertions(+), 45 deletions(-)
+ create mode 100644 src/s3ql/upgrade_support.py
+
+diff --git a/src/s3ql/adm.py b/src/s3ql/adm.py
+index d0e67..3b5d9 100644
+--- a/src/s3ql/adm.py
++++ b/src/s3ql/adm.py
+@@ -7,19 +7,23 @@ This work can be distributed under the terms of the GNU GPLv3.
+ '''
+
+ from .logging import logging, QuietError, setup_logging
+-from . import CURRENT_FS_REV, REV_VER_MAP
++from . import CURRENT_FS_REV, REV_VER_MAP, BUFSIZE
+ from .backends.comprenc import ComprencBackend
+ from .database import Connection
+ from .deltadump import TIME, INTEGER
+ from .common import (get_backend_cachedir, get_seq_no, is_mounted, get_backend,
+- load_params, save_params)
++ load_params, save_params, AsyncFn, get_backend_factory,
++ pretty_print_size, split_by_n, handle_on_return)
+ from .metadata import dump_and_upload_metadata, download_metadata
+ from . import metadata
+ from .parse_args import ArgumentParser
+ from datetime import datetime as Datetime
+ from getpass import getpass
+ from contextlib import contextmanager
++from base64 import b64encode
++from queue import Queue, Full as QueueFull
+ import os
++import tempfile
+ import shutil
+ import functools
+ import sys
+@@ -86,12 +90,11 @@ def main(args=None):
+ if options.action == 'clear':
+ with get_backend(options, raw=True) as backend:
+ return clear(backend, options)
++ elif options.action == 'upgrade':
++ return upgrade(options)
+
+ with get_backend(options) as backend:
+- if options.action == 'upgrade':
+- return upgrade(backend, get_backend_cachedir(options.storage_url,
+- options.cachedir))
+- elif options.action == 'passphrase':
++ if options.action == 'passphrase':
+ return change_passphrase(backend)
+
+ elif options.action == 'download-metadata':
+@@ -206,11 +209,24 @@ def get_old_rev_msg(rev, prog):
+ ''' % { 'version': REV_VER_MAP[rev],
+ 'prog': prog })
+
+-def upgrade(backend, cachepath):
++
++@handle_on_return
++def upgrade(options, on_return):
+ '''Upgrade file system to newest revision'''
+
+ log.info('Getting file system parameters..')
+
++ from . import backends
++ backends.local.SUPPORT_LEGACY_FORMAT = True
++ backends.s3c.SUPPORT_LEGACY_FORMAT = True
++ backends.comprenc.SUPPORT_LEGACY_FORMAT = True
++
++ cachepath = get_backend_cachedir(options.storage_url, options.cachedir)
++
++ backend_factory = get_backend_factory(options.storage_url, options.backend_options,
++ options.authfile)
++ backend = on_return.enter_context(backend_factory())
++
+ # Check for cached metadata
+ db = None
+ seq_no = get_seq_no(backend)
+@@ -249,7 +265,7 @@ def upgrade(backend, cachepath):
+ raise QuietError()
+
+ # Check revision
+- if param['revision'] < CURRENT_FS_REV-1:
++ if param['revision'] < 21:
+ print(textwrap.dedent('''
+ File system revision too old to upgrade!
+
+@@ -285,13 +301,31 @@ def upgrade(backend, cachepath):
+ with monkeypatch_metadata_retrieval():
+ db = download_metadata(backend, cachepath + '.db')
+
+- log.info('Upgrading from revision %d to %d...', param['revision'], CURRENT_FS_REV)
+-
+- param['revision'] = CURRENT_FS_REV
+ param['last-modified'] = time.time()
+ param['seq_no'] += 1
++
++ if param['revision'] == 21:
++ log.info('Upgrading from revision %d to %d...', 21, 22)
++ param['revision'] = 22
++
++ # Ensure that there are backups of the master key
++ if backend.passphrase is not None:
++ data_pw = backend.passphrase
++ backend.passphrase = backend.fs_passphrase
++ for i in range(1,4):
++ obj_id = 's3ql_passphrase_bak%d' % i
++ if obj_id not in backend:
++ backend[obj_id] = data_pw
++ backend.passphrase = data_pw
++
++ # Upgrade to revision 22
++ update_obj_metadata(backend, backend_factory, db, options.threads)
++ updated_from_21 = True
++ else:
++ updated_from_21 = False
+
+- # Upgrade
++ log.info('Upgrading from revision %d to %d...', 22, 23)
++ param['revision'] = 23
+ for name in ('atime', 'mtime', 'ctime'):
+ db.execute('ALTER TABLE inodes ADD COLUMN {time}_ns '
+ 'INT NOT NULL DEFAULT 0'.format(time=name))
+@@ -308,6 +342,14 @@ def upgrade(backend, cachepath):
+
+ print('File system upgrade complete.')
+
++ if updated_from_21 and backend.passphrase is not None:
++ print('\nPlease store the following master key in a safe location. It allows ',
++ 'decryption of the S3QL file system in case the storage objects holding ',
++ 'this information get corrupted:',
++ '---BEGIN MASTER KEY---',
++ ' '.join(split_by_n(b64encode(backend.passphrase).decode(), 4)),
++ '---END MASTER KEY---',
++ sep='\n')
+
+ @contextmanager
+ def monkeypatch_metadata_retrieval():
+@@ -351,5 +393,144 @@ def monkeypatch_metadata_retrieval():
+ metadata.DUMP_SPEC[2] = DUMP_SPEC_bak
+ metadata.create_tables = create_tables_bak
+
++def update_obj_metadata(backend, backend_factory, db, thread_count):
++ '''Upgrade metadata of storage objects'''
++
++ plain_backend = backend.backend
++
++ # No need to update sequence number, since we are going to
++ # write out a new one after the upgrade.
++ if backend.passphrase is None:
++ extra_objects = { 's3ql_metadata' }
++ else:
++ extra_objects = { 's3ql_metadata',
++ 's3ql_passphrase', 's3ql_passphrase_bak1',
++ 's3ql_passphrase_bak2', 's3ql_passphrase_bak3' }
++
++ for i in range(30):
++ obj_id = 's3ql_metadata_bak_%d' % i
++ if obj_id in plain_backend:
++ extra_objects.add(obj_id)
++
++ def yield_objects():
++ for (id_,) in db.query('SELECT id FROM objects'):
++ yield 's3ql_data_%d' % id_
++ for obj_id in extra_objects:
++ yield obj_id
++ total = db.get_val('SELECT COUNT(id) FROM objects') + len(extra_objects)
++
++ queue = Queue(maxsize=thread_count)
++ threads = []
++ for _ in range(thread_count):
++ t = AsyncFn(upgrade_loop, queue, backend_factory)
++ # Don't wait for worker threads, gives deadlock if main thread
++ # terminates with exception
++ t.daemon = True
++ t.start()
++ threads.append(t)
++
++ # Updating this value is prone to race conditions. However,
++ # we don't care because this is for an approximate progress
++ # output only.
++ queue.rewrote_size = 0
++ stamp = 0
++ for (i, obj_id) in enumerate(yield_objects()):
++ stamp2 = time.time()
++ if stamp2 - stamp > 1:
++ sys.stdout.write('\r..processed %d/%d objects (%.1f%%, %s rewritten)..'
++ % (i, total, i/total*100,
++ pretty_print_size(queue.rewrote_size)))
++ sys.stdout.flush()
++ stamp = stamp2
++
++ # Terminate early if any thread failed with an exception
++ for t in threads:
++ if not t.is_alive():
++ t.join_and_raise()
++
++ # Avoid blocking if all threads terminated
++ while True:
++ try:
++ queue.put(obj_id, timeout=1)
++ except QueueFull:
++ pass
++ else:
++ break
++ for t in threads:
++ if not t.is_alive():
++ t.join_and_raise()
++
++ queue.maxsize += len(threads)
++ for t in threads:
++ queue.put(None)
++
++ for t in threads:
++ t.join_and_raise()
++
++ sys.stdout.write('\n')
++
++def upgrade_loop(queue, backend_factory):
++
++ with backend_factory() as backend:
++ plain_backend = backend.backend
++ while True:
++ obj_id = queue.get()
++ if obj_id is None:
++ break
++
++ meta = plain_backend.lookup(obj_id)
++ if meta.get('format_version', 0) == 2:
++ continue
++
++ # For important objects, we make a copy first (just to be safe)
++ if not obj_id.startswith('s3ql_data'):
++ plain_backend.copy(obj_id, 's3ql_pre2.13' + obj_id[4:])
++
++ # When reading passphrase objects, we have to use the
++ # "outer" password
++ if obj_id.startswith('s3ql_passphrase'):
++ data_pw = backend.passphrase
++ backend.passphrase = backend.fs_passphrase
++
++ meta = backend._convert_legacy_metadata(meta)
++ if meta['encryption'] == 'AES':
++ # Two statements to reduce likelihood of update races
++ size = rewrite_legacy_object(backend, obj_id)
++ queue.rewrote_size += size
++ else:
++ plain_backend.update_meta(obj_id, meta)
++
++ if obj_id.startswith('s3ql_passphrase'):
++ backend.passphrase = data_pw
++
++def rewrite_legacy_object(backend, obj_id):
++ with tempfile.TemporaryFile() as tmpfh:
++
++ # Read object
++ def do_read(fh):
++ tmpfh.seek(0)
++ tmpfh.truncate()
++ while True:
++ buf = fh.read(BUFSIZE)
++ if not buf:
++ break
++ tmpfh.write(buf)
++ return fh.metadata
++
++ meta = backend.perform_read(do_read, obj_id)
++
++ # Write object
++ def do_write(fh):
++ tmpfh.seek(0)
++ while True:
++ buf = tmpfh.read(BUFSIZE)
++ if not buf:
++ break
++ fh.write(buf)
++ return fh
++ out_fh = backend.perform_write(do_write, obj_id, meta)
++
++ return out_fh.get_obj_size()
++
+ if __name__ == '__main__':
+ main(sys.argv[1:])
+diff --git a/src/s3ql/backends/comprenc.py b/src/s3ql/backends/comprenc.py
+index 58e90..285f5 100644
+--- a/src/s3ql/backends/comprenc.py
++++ b/src/s3ql/backends/comprenc.py
+@@ -25,6 +25,11 @@ import zlib
+
+ log = logging.getLogger(__name__)
+
++from ..upgrade_support import safe_unpickle, pickle
++from base64 import b64decode, b64encode
++import binascii
++SUPPORT_LEGACY_FORMAT=False
++
+ HMAC_SIZE = 32
+
+ def sha256(s):
+@@ -67,6 +72,8 @@ class ComprencBackend(AbstractBackend, metaclass=ABCDocstMeta):
+ @copy_ancestor_docstring
+ def lookup(self, key):
+ meta_raw = self.backend.lookup(key)
++ if SUPPORT_LEGACY_FORMAT and meta_raw.get('format_version', 0) < 2:
++ meta_raw = self._convert_legacy_metadata(meta_raw)
+ return self._verify_meta(key, meta_raw)[1]
+
+ @prepend_ancestor_docstring
+@@ -147,43 +154,80 @@ class ComprencBackend(AbstractBackend, metaclass=ABCDocstMeta):
+ """
+
+ fh = self.backend.open_read(key)
++ checksum_warning = False
+ try:
+- meta_raw = fh.metadata
+- (nonce, meta) = self._verify_meta(key, meta_raw)
+- if nonce:
+- data_key = sha256(self.passphrase + nonce)
+-
+- # The `payload_offset` key only exists if the storage object was
+- # created with on old S3QL version. In order to avoid having to
+- # download and re-upload the entire object during the upgrade, the
+- # upgrade procedure adds this header to tell us how many bytes at
+- # the beginning of the object we have to skip to get to the payload.
+- if 'payload_offset' in meta_raw:
+- to_skip = meta_raw['payload_offset']
+- while to_skip:
+- to_skip -= len(fh.read(to_skip))
+-
+- encr_alg = meta_raw['encryption']
+- if encr_alg == 'AES_v2':
+- fh = DecryptFilter(fh, data_key)
+- elif encr_alg != 'None':
+- raise RuntimeError('Unsupported encryption: %s' % encr_alg)
+-
+- compr_alg = meta_raw['compression']
+- if compr_alg == 'BZIP2':
+- fh = DecompressFilter(fh, bz2.BZ2Decompressor())
+- elif compr_alg == 'LZMA':
+- fh = DecompressFilter(fh, lzma.LZMADecompressor())
+- elif compr_alg == 'ZLIB':
+- fh = DecompressFilter(fh,zlib.decompressobj())
+- elif compr_alg != 'None':
+- raise RuntimeError('Unsupported compression: %s' % compr_alg)
++ if SUPPORT_LEGACY_FORMAT:
++ if fh.metadata.get('format_version', 0) < 2:
++ meta_raw = self._convert_legacy_metadata(fh.metadata)
++ else:
++ meta_raw = fh.metadata
++ (nonce, meta) = self._verify_meta(key, meta_raw)
++ if nonce:
++ data_key = sha256(self.passphrase + nonce)
++ compr_alg = meta_raw['compression']
++ encr_alg = meta_raw['encryption']
++ if compr_alg == 'BZIP2':
++ decompressor = bz2.BZ2Decompressor()
++ elif compr_alg == 'LZMA':
++ decompressor = lzma.LZMADecompressor()
++ elif compr_alg == 'ZLIB':
++ decompressor = zlib.decompressobj()
++ elif compr_alg == 'None':
++ decompressor = None
++ else:
++ raise RuntimeError('Unsupported compression: %s' % compr_alg)
++ if 'payload_offset' in meta_raw:
++ to_skip = meta_raw['payload_offset']
++ while to_skip:
++ to_skip -= len(fh.read(to_skip))
++ checksum_warning = True
++ if encr_alg == 'AES':
++ fh = LegacyDecryptDecompressFilter(fh, data_key, decompressor)
++ decompressor = None
++ elif encr_alg == 'AES_v2':
++ fh = DecryptFilter(fh, data_key)
++ elif encr_alg != 'None':
++ raise RuntimeError('Unsupported encryption: %s' % encr_alg)
++ if decompressor:
++ fh = DecompressFilter(fh, decompressor)
++ else:
++ meta_raw = fh.metadata
++
++ (nonce, meta) = self._verify_meta(key, meta_raw)
++ if nonce:
++ data_key = sha256(self.passphrase + nonce)
++
++ # The `payload_offset` key only exists if the storage object was
++ # created with on old S3QL version. In order to avoid having to
++ # download and re-upload the entire object during the upgrade, the
++ # upgrade procedure adds this header to tell us how many bytes at
++ # the beginning of the object we have to skip to get to the payload.
++ if 'payload_offset' in meta_raw:
++ to_skip = meta_raw['payload_offset']
++ while to_skip:
++ to_skip -= len(fh.read(to_skip))
++
++ encr_alg = meta_raw['encryption']
++ if encr_alg == 'AES_v2':
++ fh = DecryptFilter(fh, data_key)
++ elif encr_alg != 'None':
++ raise RuntimeError('Unsupported encryption: %s' % encr_alg)
++
++ compr_alg = meta_raw['compression']
++ if compr_alg == 'BZIP2':
++ fh = DecompressFilter(fh, bz2.BZ2Decompressor())
++ elif compr_alg == 'LZMA':
++ fh = DecompressFilter(fh, lzma.LZMADecompressor())
++ elif compr_alg == 'ZLIB':
++ fh = DecompressFilter(fh,zlib.decompressobj())
++ elif compr_alg != 'None':
++ raise RuntimeError('Unsupported compression: %s' % compr_alg)
+
+ fh.metadata = meta
+ except:
+ # Don't emit checksum warning, caller hasn't even
+ # started reading anything.
+- fh.close(checksum_warning=False)
++ fh.close(checksum_warning=checksum_warning)
+ raise
+
+ return fh
+@@ -275,6 +319,8 @@ class ComprencBackend(AbstractBackend, metaclass=ABCDocstMeta):
+
+ def _copy_or_rename(self, src, dest, rename, metadata=None):
+ meta_raw = self.backend.lookup(src)
++ if SUPPORT_LEGACY_FORMAT and meta_raw.get('format_version', 0) < 2:
++ meta_raw = self._convert_legacy_metadata(meta_raw)
+ (nonce, meta_old) = self._verify_meta(src, meta_raw)
+
+ if nonce:
+@@ -303,6 +349,165 @@ class ComprencBackend(AbstractBackend, metaclass=ABCDocstMeta):
+ def close(self):
+ self.backend.close()
+
++ def _convert_legacy_metadata(self, meta):
++ '''Convert metadata to newest format
++
++ This method ensures that we can read objects written
++ by older S3QL versions.
++ '''
++
++ format_version = meta.get('format_version', 0)
++ assert format_version in (0,1)
++ if format_version == 0:
++ meta = self._convert_legacy_metadata0(meta)
++ return self._convert_legacy_metadata1(meta)
++
++ def _convert_legacy_metadata0(self, meta,
++ LEN_BYTES = struct.calcsize(b'<B'),
++ TIME_BYTES = struct.calcsize(b'<f')):
++ meta_new = dict(format_version=1)
++
++ if ('encryption' in meta and
++ 'compression' in meta):
++ meta_new['encryption'] = meta['encryption']
++ meta_new['compression'] = meta['compression']
++
++ elif 'encrypted' in meta:
++ s = meta['encrypted']
++ if s == 'True':
++ meta_new['encryption'] = 'AES'
++ meta_new['compression'] = 'BZIP2'
++
++ elif s == 'False':
++ meta_new['encryption'] = 'None'
++ meta_new['compression'] = 'None'
++
++ elif s.startswith('AES/'):
++ meta_new['encryption'] = 'AES'
++ meta_new['compression'] = s[4:]
++
++ elif s.startswith('PLAIN/'):
++ meta_new['encryption'] = 'None'
++ meta_new['compression'] = s[6:]
++ else:
++ raise RuntimeError('Unsupported encryption')
++
++ if meta_new['compression'] == 'BZ2':
++ meta_new['compression'] = 'BZIP2'
++
++ if meta_new['compression'] == 'NONE':
++ meta_new['compression'] = 'None'
++ else:
++ meta_new['encryption'] = 'None'
++ meta_new['compression'] = 'None'
++
++ # Extract metadata (pre 2.x versions use multiple headers)
++ if any(k.startswith('meta') for k in meta):
++ parts = [ meta[k] for k in sorted(meta.keys())
++ if k.startswith('meta') ]
++ meta_new['data'] = ''.join(parts)
++ else:
++ try:
++ meta_new['data'] = meta['data']
++ except KeyError:
++ raise CorruptedObjectError('meta key data is missing')
++
++ if not self.passphrase:
++ return meta_new
++
++ meta_buf = b64decode(meta_new['data'])
++ off = 0
++ def read(len_):
++ nonlocal off
++ tmp = meta_buf[off:off+len_]
++ off += len_
++ return tmp
++
++ len_ = struct.unpack(b'<B', read(LEN_BYTES))[0]
++ nonce = read(len_)
++ key = sha256(self.passphrase + nonce)
++ cipher = aes_cipher(key)
++ hmac_ = hmac.new(key, digestmod=hashlib.sha256)
++ hash_ = read(HMAC_SIZE)
++ meta_buf = meta_buf[off:]
++ meta_buf_plain = cipher.decrypt(meta_buf)
++ hmac_.update(meta_buf_plain)
++ hash_ = cipher.decrypt(hash_)
++
++ if not hmac.compare_digest(hash_, hmac_.digest()):
++ raise CorruptedObjectError('HMAC mismatch')
++
++ obj_id = nonce[TIME_BYTES:].decode('utf-8')
++ meta_key = sha256(self.passphrase + nonce + b'meta')
++ meta_new['nonce'] = b64encode(nonce)
++ meta_new['payload_offset'] = LEN_BYTES + len(nonce)
++ meta_new['data'] = b64encode(aes_cipher(meta_key).encrypt(meta_buf_plain))
++ meta_new['object_id'] = b64encode(obj_id.encode('utf-8'))
++ meta_new['signature'] = calc_legacy_meta_checksum(meta_new, meta_key)
++
++ return meta_new
++
++ def _convert_legacy_metadata1(self, metadata):
++ if not isinstance(metadata, dict):
++ raise CorruptedObjectError('metadata should be dict, not %s' % type(metadata))
++
++ for mkey in ('encryption', 'compression', 'data'):
++ if mkey not in metadata:
++ raise CorruptedObjectError('meta key %s is missing' % mkey)
++
++ encr_alg = metadata['encryption']
++ encrypted = (encr_alg != 'None')
++
++ if encrypted and self.passphrase is None:
++ raise CorruptedObjectError('Encrypted object and no passphrase supplied')
++
++ elif not encrypted and self.passphrase is not None:
++ raise ObjectNotEncrypted()
++
++ try:
++ meta_buf = b64decode(metadata['data'])
++ except binascii.Error:
++ raise CorruptedObjectError('Invalid metadata, b64decode failed')
++
++ if not encrypted:
++ try:
++ meta2 = safe_unpickle(meta_buf, encoding='latin1')
++ except pickle.UnpicklingError as exc:
++ raise CorruptedObjectError('Invalid metadata, pickle says: %s' % exc)
++ if meta2 is None:
++ meta2 = dict()
++ metadata['data'] = freeze_basic_mapping(meta2)
++ metadata['format_version'] = 2
++ return metadata
++
++ # Encrypted
++ for mkey in ('nonce', 'signature', 'object_id'):
++ if mkey not in metadata:
++ raise CorruptedObjectError('meta key %s is missing' % mkey)
++
++ nonce = b64decode(metadata['nonce'])
++ meta_key = sha256(self.passphrase + nonce + b'meta')
++ meta_sig = calc_legacy_meta_checksum(metadata, meta_key)
++ if not hmac.compare_digest(metadata['signature'], meta_sig):
++ raise CorruptedObjectError('HMAC mismatch')
++
++ buf = aes_cipher(meta_key).decrypt(meta_buf)
++ try:
++ meta2 = safe_unpickle(buf, encoding='latin1')
++ except pickle.UnpicklingError as exc:
++ raise CorruptedObjectError('Invalid metadata, pickle says: %s' % exc)
++ if meta2 is None:
++ meta2 = dict()
++
++ meta_buf = freeze_basic_mapping(meta2)
++ metadata['nonce'] = nonce
++ metadata['object_id'] = b64decode(metadata['object_id']).decode('utf-8')
++ metadata['data'] = aes_cipher(meta_key).encrypt(meta_buf)
++ metadata['format_version'] = 2
++ metadata['signature'] = checksum_basic_mapping(metadata, meta_key)
++
++ return metadata
++
+ class CompressFilter(object):
+ '''Compress data while writing'''
+
+@@ -675,3 +880,107 @@ class ObjectNotEncrypted(Exception):
+ '''
+
+ pass
++
++
++def calc_legacy_meta_checksum(metadata, key):
++ # This works most of the time, so we still try to validate the
++ # signature. But in general, the pickle output is not unique so this is
++ # not a good way to compute a checksum.
++ chk = hmac.new(key, digestmod=hashlib.sha256)
++ for mkey in sorted(metadata.keys()):
++ assert isinstance(mkey, str)
++ if mkey == 'signature':
++ continue
++ val = metadata[mkey]
++ if isinstance(val, str):
++ val = val.encode('utf-8')
++ elif not isinstance(val, (bytes, bytearray)):
++ val = pickle.dumps(val, 2)
++ chk.update(mkey.encode('utf-8') + val)
++ return b64encode(chk.digest())
++
++class LegacyDecryptDecompressFilter(io.RawIOBase):
++ '''Decrypt and Decompress data while reading
++
++ Reader has to read the entire stream in order for HMAC
++ checking to work.
++ '''
++
++ def __init__(self, fh, key, decomp):
++ '''Initialize
++
++ *fh* should be a file-like object and may be unbuffered.
++ '''
++ super().__init__()
++
++ self.fh = fh
++ self.decomp = decomp
++ self.hmac_checked = False
++ self.cipher = aes_cipher(key)
++ self.hmac = hmac.new(key, digestmod=hashlib.sha256)
++ self.hash = fh.read(HMAC_SIZE)
++
++ def discard_input(self):
++ while True:
++ buf = self.fh.read(BUFSIZE)
++ if not buf:
++ break
++
++ def _decrypt(self, buf):
++ # Work around https://bugs.launchpad.net/pycrypto/+bug/1256172
++ # cipher.decrypt refuses to work with anything but bytes
++ if not isinstance(buf, bytes):
++ buf = bytes(buf)
++
++ len_ = len(buf)
++ buf = self.cipher.decrypt(buf)
++ assert len(buf) == len_
++ return buf
++
++ def read(self, size=-1):
++ '''Read up to *size* bytes
++
++ This method is currently buggy and may also return *more*
++ than *size* bytes. Callers should be prepared to handle
++ that. This is because some of the used (de)compression modules
++ don't support output limiting.
++ '''
++
++ if size == -1:
++ return self.readall()
++ elif size == 0:
++ return b''
++
++ buf = None
++ while not buf:
++ buf = self.fh.read(size)
++ if not buf and not self.hmac_checked:
++ if not hmac.compare_digest(self._decrypt(self.hash),
++ self.hmac.digest()):
++ raise CorruptedObjectError('HMAC mismatch')
++ elif self.decomp and self.decomp.unused_data:
++ raise CorruptedObjectError('Data after end of compressed stream')
++ else:
++ self.hmac_checked = True
++ return b''
++ elif not buf:
++ return b''
++
++ buf = self._decrypt(buf)
++ if not self.decomp:
++ break
++
++ buf = decompress(self.decomp, buf)
++
++ self.hmac.update(buf)
++ return buf
++
++ def close(self, *a, **kw):
++ self.fh.close(*a, **kw)
++
++ def __enter__(self):
++ return self
++
++ def __exit__(self, *a):
++ self.close()
++ return False
+diff --git a/src/s3ql/backends/local.py b/src/s3ql/backends/local.py
+index a8ebf..03cfd 100644
+--- a/src/s3ql/backends/local.py
++++ b/src/s3ql/backends/local.py
+@@ -18,6 +18,9 @@ import io
+ import os
+ import shutil
+
++from ..upgrade_support import safe_unpickle_fh, pickle
++SUPPORT_LEGACY_FORMAT=False
++
+ log = logging.getLogger(__name__)
+
+ class Backend(AbstractBackend, metaclass=ABCDocstMeta):
+@@ -241,7 +244,14 @@ class Backend(AbstractBackend, metaclass=ABCDocstMeta):
+ def _read_meta(fh):
+ buf = fh.read(9)
+ if not buf.startswith(b's3ql_1\n'):
+- raise CorruptedObjectError('Invalid object header: %r' % buf)
++ if SUPPORT_LEGACY_FORMAT:
++ fh.seek(0)
++ try:
++ return safe_unpickle_fh(fh, encoding='latin1')
++ except pickle.UnpicklingError as exc:
++ raise CorruptedObjectError('Invalid metadata, pickle says: %s' % exc)
++ else:
++ raise CorruptedObjectError('Invalid object header: %r' % buf)
+
+ len_ = struct.unpack('<H', buf[-2:])[0]
+ try:
+diff --git a/src/s3ql/backends/s3c.py b/src/s3ql/backends/s3c.py
+index 0a5a4..dc049 100644
+--- a/src/s3ql/backends/s3c.py
++++ b/src/s3ql/backends/s3c.py
+@@ -34,6 +34,9 @@ import time
+ import ssl
+ import urllib.parse
+
++from ..upgrade_support import safe_unpickle, pickle
++SUPPORT_LEGACY_FORMAT=False
++
+ C_DAY_NAMES = [ 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun' ]
+ C_MONTH_NAMES = [ 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec' ]
+
+@@ -742,7 +745,31 @@ class Backend(AbstractBackend, metaclass=ABCDocstMeta):
+
+ format_ = resp.headers.get('%smeta-format' % self.hdr_prefix, 'raw')
+ if format_ != 'raw2': # Current
+- raise CorruptedObjectError('Invalid metadata format: %s' % format_)
++ if SUPPORT_LEGACY_FORMAT:
++ meta = CaseInsensitiveDict()
++ pattern = re.compile(r'^%smeta-(.+)$' % re.escape(self.hdr_prefix),
++ re.IGNORECASE)
++ for fname in resp.headers:
++ hit = pattern.search(fname)
++ if hit:
++ meta[hit.group(1)] = resp.headers[fname]
++
++ if format_ == 'raw':
++ return meta
++
++ # format_ == pickle
++ buf = ''.join(meta[x] for x in sorted(meta) if x.lower().startswith('data-'))
++ if 'md5' in meta and md5sum_b64(buf.encode('us-ascii')) != meta['md5']:
++ log.warning('MD5 mismatch in metadata for %s', obj_key)
++ raise BadDigestError('BadDigest', 'Meta MD5 for %s does not match' % obj_key)
++ try:
++ return safe_unpickle(b64decode(buf), encoding='latin1')
++ except binascii.Error:
++ raise CorruptedObjectError('Corrupted metadata, b64decode failed')
++ except pickle.UnpicklingError as exc:
++ raise CorruptedObjectError('Corrupted metadata, pickle says: %s' % exc)
++ else:
++ raise CorruptedObjectError('Invalid metadata format: %s' % format_)
+
+ parts = []
+ for i in count():
+diff --git a/src/s3ql/upgrade_support.py b/src/s3ql/upgrade_support.py
+new file mode 100644
+index 00000..17079
+--- /dev/null
++++ b/src/s3ql/upgrade_support.py
+@@ -0,0 +1,75 @@
++'''
++Routines for reading old metadata to allow upgrade.
++Forward-ported from Mercurial commit 773931c43368.
++'''
++
++from .logging import logging # Ensure use of custom logger class
++import pickletools
++import pickle
++import codecs
++import io
++
++log = logging.getLogger(__name__)
++
++SAFE_UNPICKLE_OPCODES = {'BININT', 'BININT1', 'BININT2', 'LONG1', 'LONG4',
++ 'BINSTRING', 'SHORT_BINSTRING', 'GLOBAL',
++ 'NONE', 'NEWTRUE', 'NEWFALSE', 'BINUNICODE',
++ 'BINFLOAT', 'EMPTY_LIST', 'APPEND', 'APPENDS',
++ 'LIST', 'EMPTY_TUPLE', 'TUPLE', 'TUPLE1', 'TUPLE2',
++ 'TUPLE3', 'EMPTY_DICT', 'DICT', 'SETITEM',
++ 'SETITEMS', 'POP', 'DUP', 'MARK', 'POP_MARK',
++ 'BINGET', 'LONG_BINGET', 'BINPUT', 'LONG_BINPUT',
++ 'PROTO', 'STOP', 'REDUCE'}
++
++SAFE_UNPICKLE_GLOBAL_NAMES = { ('__builtin__', 'bytearray'),
++ ('__builtin__', 'set'),
++ ('__builtin__', 'frozenset'),
++ ('_codecs', 'encode') }
++SAFE_UNPICKLE_GLOBAL_OBJS = { bytearray, set, frozenset, codecs.encode }
++
++class SafeUnpickler(pickle.Unpickler):
++ def find_class(self, module, name):
++ if (module, name) not in SAFE_UNPICKLE_GLOBAL_NAMES:
++ raise pickle.UnpicklingError("global '%s.%s' is unsafe" %
++ (module, name))
++ ret = super().find_class(module, name)
++ if ret not in SAFE_UNPICKLE_GLOBAL_OBJS:
++ raise pickle.UnpicklingError("global '%s.%s' is unsafe" %
++ (module, name))
++ return ret
++
++
++def safe_unpickle_fh(fh, fix_imports=True, encoding="ASCII",
++ errors="strict"):
++ '''Safely unpickle untrusted data from *fh*
++
++ *fh* must be seekable.
++ '''
++
++ if not fh.seekable():
++ raise TypeError('*fh* must be seekable')
++ pos = fh.tell()
++
++ # First make sure that we know all used opcodes
++ try:
++ for (opcode, arg, _) in pickletools.genops(fh):
++ if opcode.proto > 2 or opcode.name not in SAFE_UNPICKLE_OPCODES:
++ raise pickle.UnpicklingError('opcode %s is unsafe' % opcode.name)
++ except (ValueError, EOFError):
++ raise pickle.UnpicklingError('corrupted data')
++
++ fh.seek(pos)
++
++ # Then use a custom Unpickler to ensure that we only give access to
++ # specific, whitelisted globals. Note that with the above opcodes, there is
++ # no way to trigger attribute access, so "brachiating" from a white listed
++ # object to __builtins__ is not possible.
++ return SafeUnpickler(fh, fix_imports=fix_imports,
++ encoding=encoding, errors=errors).load()
++
++def safe_unpickle(buf, fix_imports=True, encoding="ASCII",
++ errors="strict"):
++ '''Safely unpickle untrusted data in *buf*'''
++
++ return safe_unpickle_fh(io.BytesIO(buf), fix_imports=fix_imports,
++ encoding=encoding, errors=errors)
diff --git a/py3dist-overrides b/py3dist-overrides
new file mode 100644
index 0000000..e386e0b
--- /dev/null
+++ b/py3dist-overrides
@@ -0,0 +1,3 @@
+dugong python3-dugong; PEP386
+apsw python3-apsw; PEP386
+llfuse python3-llfuse; PEP386
diff --git a/rules b/rules
new file mode 100755
index 0000000..00354e3
--- /dev/null
+++ b/rules
@@ -0,0 +1,112 @@
+#!/usr/bin/make -f
+# -*- makefile -*-
+
+#export DH_VERBOSE=1
+DEB_HOST_MULTIARCH ?= $(shell dpkg-architecture -qDEB_HOST_MULTIARCH)
+
+%:
+ dh $@ --with python3,sphinxdoc
+
+override_dh_auto_build: build_cython build_sphinx build_python
+
+build_cython:
+ dh_testdir
+ python3 setup.py build_cython
+ touch $@
+
+build_sphinx: build_inplace
+ dh_testdir
+ python3 setup.py build_sphinx
+ touch $@
+
+build_inplace: build_cython
+ python3 setup.py build_ext --inplace
+ touch $@
+
+build_python: build_cython
+ dh_testdir
+ python3-dbg setup.py build -g
+ python3 setup.py build -g
+ touch $@
+
+override_dh_auto_install:
+ dh_testdir
+ dh_testroot
+ dh_prep
+ dh_installdirs
+
+ # Note: Install non -dbg last, so that scripts don't get a -dbg interpreter
+ python3-dbg setup.py install --force --root=debian/tmp \
+ --install-lib=/usr/lib/s3ql \
+ --install-scripts=/usr/lib/s3ql \
+ --no-compile -O0 --install-layout=deb
+
+ python3 setup.py install --force --root=debian/tmp \
+ --install-lib=/usr/lib/s3ql \
+ --install-scripts=/usr/lib/s3ql \
+ --no-compile -O0 --install-layout=deb
+
+override_dh_auto_test:
+ifeq (,$(findstring nocheck,$(DEB_BUILD_OPTIONS)))
+ python3 setup.py build_ext --inplace
+ # Some tests will try to create log files in $HOME
+ mkdir -p debian/tmphome
+ HOME=$(CURDIR)/debian/tmphome py.test-3 --logdebug=s3ql.verify tests/
+endif
+
+override_dh_install:
+ dh_install
+
+ # Rename to avoid conflict with pcp package
+ (cd debian/s3ql/usr/share/man/man1/; mv pcp.1 parallel-cp.1)
+
+ # Install debugging extension and remove from regular package
+ (cd debian/s3ql; \
+ find . -regextype posix-egrep -regex ".+\\.[a-z]+-[0-9]{2,}[a-z]*d[a-z]*-$(DEB_HOST_MULTIARCH)\\.so" \
+ -printf "%P\0" -delete) \
+ | xargs -0 dh_install --autodest -ps3ql-dbg
+
+ # Link all executable files not ending in .py into /usr/bin
+ mkdir -p debian/s3ql/usr/bin/
+ for entry in debian/s3ql/usr/lib/s3ql/*; do \
+ if [ -d "$$entry" ] || [ ! -x "$$entry" ] \
+ || [ "$${entry%.py}" != "$$entry" ]; then \
+ continue; \
+ fi; \
+ ln -s ../lib/s3ql/`basename "$$entry"` \
+ "debian/s3ql/usr/bin/`basename \"$$entry\"`"; \
+ done
+
+override_dh_link:
+ rm -rf debian/s3ql-dbg/usr/share/doc/s3ql-dbg
+ dh_link
+
+
+# We don't want to call setup.py clean, because this
+# requires a number of (potentially uninstalled) build-dependencies.
+override_dh_auto_clean:
+ rm -rf build doc tests/test.log src/s3ql/deltadump.c contrib/*.1
+ find \( \( -name '*.egg-info' -type d \) \
+ -o \( -name __pycache__ -type d \) \
+ -o \( -name '*.so' -type f \) \
+ \) -prune -exec rm -rf '{}' +
+ rm -rf build-python build_cython build_sphinx build-stamp
+ rm -rf debian/tmphome
+
+override_dh_strip:
+ifeq (,$(filter nostrip,$(DEB_BUILD_OPTIONS)))
+ dh_strip -ps3ql --dbg-package=s3ql-dbg
+endif
+
+.PHONY: get-orig-source
+get-orig-source:
+ # Work around bug #749225
+ PERL_LWP_SSL_VERIFY_HOSTNAME=0 \
+ uscan --rename --destdir=$(CURDIR)/.. --repack --force-download \
+ --download-current-version --compression xz
+
+.PHONY: uscan
+uscan:
+ # Work around bug #749225
+ PERL_LWP_SSL_VERIFY_HOSTNAME=0 \
+ uscan --rename --destdir=$(CURDIR)/.. --repack --compression xz
diff --git a/s3ql-dbg.links b/s3ql-dbg.links
new file mode 100644
index 0000000..6c91288
--- /dev/null
+++ b/s3ql-dbg.links
@@ -0,0 +1 @@
+/usr/share/doc/s3ql /usr/share/doc/s3ql-dbg
diff --git a/s3ql.doc-base b/s3ql.doc-base
new file mode 100644
index 0000000..fe8fa4f
--- /dev/null
+++ b/s3ql.doc-base
@@ -0,0 +1,11 @@
+Document: s3ql
+Title: S3QL User's Guide
+Author: Nikolaus Rath <Nikolaus@rath.org>
+Section: File Management
+
+Format: HTML
+Index: /usr/share/doc/s3ql/html/index.html
+Files: /usr/share/doc/s3ql/html/*.html
+
+Format: PDF
+Files: /usr/share/doc/s3ql/manual.pdf.gz
diff --git a/s3ql.docs b/s3ql.docs
new file mode 100644
index 0000000..5214ec8
--- /dev/null
+++ b/s3ql.docs
@@ -0,0 +1,2 @@
+doc/html/
+doc/manual.pdf
diff --git a/s3ql.examples b/s3ql.examples
new file mode 100644
index 0000000..59dbaa2
--- /dev/null
+++ b/s3ql.examples
@@ -0,0 +1 @@
+contrib/s3ql_backup.sh
diff --git a/s3ql.install b/s3ql.install
new file mode 100644
index 0000000..9ef8edd
--- /dev/null
+++ b/s3ql.install
@@ -0,0 +1,4 @@
+usr/
+contrib/*.py /usr/lib/s3ql/
+contrib/*.1 /usr/share/man/man1/
+
diff --git a/s3ql.links b/s3ql.links
new file mode 100644
index 0000000..fdf4300
--- /dev/null
+++ b/s3ql.links
@@ -0,0 +1,3 @@
+/usr/lib/s3ql/pcp.py /usr/bin/parallel-cp
+/usr/lib/s3ql/expire_backups.py /usr/bin/expire_backups
+/usr/lib/s3ql/remove_objects.py /usr/bin/s3ql_remove_objects
diff --git a/source/format b/source/format
new file mode 100644
index 0000000..163aaf8
--- /dev/null
+++ b/source/format
@@ -0,0 +1 @@
+3.0 (quilt)
diff --git a/source/include-binaries b/source/include-binaries
new file mode 100644
index 0000000..fb217db
--- /dev/null
+++ b/source/include-binaries
@@ -0,0 +1,2 @@
+debian/python.inv
+debian/upstream-signing-key.pgp
diff --git a/source/options b/source/options
new file mode 100644
index 0000000..b306286
--- /dev/null
+++ b/source/options
@@ -0,0 +1,2 @@
+compression = "xz"
+compression-level = 6
diff --git a/tests/control b/tests/control
new file mode 100644
index 0000000..b4774c2
--- /dev/null
+++ b/tests/control
@@ -0,0 +1,10 @@
+Tests: upstream-standard
+Depends: python3-pytest, python3-pytest-catchlog, rsync, s3ql, fuse, psmisc
+
+# This is really the same test suite, but when run with root
+# access (or permission to mount fuse file systems), additional
+# tests will be run.
+Tests: upstream-with-fuse
+Depends: python3-pytest, python3-pytest-catchlog, rsync, s3ql, fuse, psmisc
+Restrictions: needs-root
+
diff --git a/tests/upstream-standard b/tests/upstream-standard
new file mode 100755
index 0000000..19ceda8
--- /dev/null
+++ b/tests/upstream-standard
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+# Test suite automatically skips over tests that cannot be
+# executed because of insufficient priviliges.
+
+export PYTHONPATH="/usr/lib/s3ql:${PYTHONPATH}"
+exec py.test-3 --installed --logdebug=s3ql.verify tests/
diff --git a/tests/upstream-with-fuse b/tests/upstream-with-fuse
new file mode 100755
index 0000000..21dbbc4
--- /dev/null
+++ b/tests/upstream-with-fuse
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+export PYTHONPATH="/usr/lib/s3ql:${PYTHONPATH}"
+exec py.test-3 --installed --logdebug=s3ql.verify tests/
diff --git a/upstream-signing-key.pgp b/upstream-signing-key.pgp
new file mode 100644
index 0000000..d2ea347
--- /dev/null
+++ b/upstream-signing-key.pgp
Binary files differ
diff --git a/watch b/watch
new file mode 100644
index 0000000..d7817fa
--- /dev/null
+++ b/watch
@@ -0,0 +1,9 @@
+# watch control file for uscan
+version=3
+opts=\
+pgpsigurlmangle=s/$/.asc/,\
+uversionmangle=s/$/+dfsg/ \
+ https://bitbucket.org/nikratio/s3ql/downloads \
+ .*/s3ql-(\d\S*)\.tar\.bz2 \
+ debian
+